From c004c94c4588e30d8decfea7faa21f7f44429b8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:40:38 +0000 Subject: [PATCH] Bump google.golang.org/api from 0.70.0 to 0.167.0 in /event-exporter Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.70.0 to 0.167.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.70.0...v0.167.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- event-exporter/go.mod | 49 +- event-exporter/go.sum | 214 +- .../go/compute/internal/version.go | 18 + .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata}/LICENSE | 0 .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 6 +- .../go/compute/metadata/tidyfix.go | 23 + .../github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + .../github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + .../github.com/felixge/httpsnoop/Makefile | 10 + .../github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + .../github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 + .../httpsnoop/wrap_generated_lt_1.8.go | 278 + .../github.com/go-logr/logr/.golangci.yaml | 3 - .../vendor/github.com/go-logr/logr/README.md | 130 +- .../github.com/go-logr/logr/SECURITY.md | 18 + .../vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + .../vendor/github.com/go-logr/logr/discard.go | 32 +- .../github.com/go-logr/logr/funcr/funcr.go | 911 + .../github.com/go-logr/logr/funcr/slogsink.go | 105 + .../vendor/github.com/go-logr/logr/logr.go | 243 +- .../github.com/go-logr/logr/sloghandler.go | 192 + .../vendor/github.com/go-logr/logr/slogr.go | 100 + .../github.com/go-logr/logr/slogsink.go | 120 + .../vendor/github.com/go-logr/stdr/LICENSE | 201 + .../vendor/github.com/go-logr/stdr/README.md | 6 + .../vendor/github.com/go-logr/stdr/stdr.go | 170 + .../vendor/github.com/golang/glog/glog.go | 1188 +- .../github.com/golang/glog/glog_file.go | 311 +- .../github.com/golang/glog/glog_file_linux.go | 39 + .../github.com/golang/glog/glog_file_other.go | 30 + .../github.com/golang/glog/glog_file_posix.go | 53 + .../github.com/golang/glog/glog_flags.go | 398 + .../golang/glog/internal/logsink/logsink.go | 387 + .../glog/internal/logsink/logsink_fatal.go | 35 + .../glog/internal/stackdump/stackdump.go | 127 + .../golang/protobuf/jsonpb/decode.go | 530 + .../golang/protobuf/jsonpb/encode.go | 559 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../github.com/google/go-cmp/cmp/compare.go | 82 +- .../cmp/{export_unsafe.go => export.go} | 5 - .../google/go-cmp/cmp/export_panic.go | 16 - .../google/go-cmp/cmp/internal/diff/diff.go | 44 +- .../value/{pointer_unsafe.go => pointer.go} | 3 - .../cmp/internal/value/pointer_purego.go | 34 - .../google/go-cmp/cmp/internal/value/zero.go | 48 - .../github.com/google/go-cmp/cmp/options.go | 92 +- .../github.com/google/go-cmp/cmp/path.go | 66 +- .../google/go-cmp/cmp/report_compare.go | 13 +- .../google/go-cmp/cmp/report_reflect.go | 23 +- .../google/go-cmp/cmp/report_slices.go | 25 +- .../google/go-cmp/cmp/report_text.go | 1 + .../github.com/google/s2a-go/.gitignore | 6 + .../google/s2a-go/CODE_OF_CONDUCT.md | 93 + .../github.com/google/s2a-go/CONTRIBUTING.md | 29 + .../github.com/google/s2a-go/LICENSE.md | 202 + .../vendor/github.com/google/s2a-go/README.md | 14 + .../google/s2a-go/fallback/s2a_fallback.go | 167 + .../s2a-go/internal/authinfo/authinfo.go | 119 + .../s2a-go/internal/handshaker/handshaker.go | 438 + .../internal/handshaker/service/service.go | 66 + .../proto/common_go_proto/common.pb.go | 389 + .../s2a_context_go_proto/s2a_context.pb.go | 267 + .../internal/proto/s2a_go_proto/s2a.pb.go | 1377 + .../proto/s2a_go_proto/s2a_grpc.pb.go | 173 + .../proto/v2/common_go_proto/common.pb.go | 367 + .../v2/s2a_context_go_proto/s2a_context.pb.go | 248 + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 2494 ++ .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 159 + .../internal/aeadcrypter/aeadcrypter.go | 34 + .../record/internal/aeadcrypter/aesgcm.go | 70 + .../record/internal/aeadcrypter/chachapoly.go | 67 + .../record/internal/aeadcrypter/common.go | 92 + .../record/internal/halfconn/ciphersuite.go | 98 + .../record/internal/halfconn/counter.go | 60 + .../record/internal/halfconn/expander.go | 59 + .../record/internal/halfconn/halfconn.go | 193 + .../google/s2a-go/internal/record/record.go | 757 + .../s2a-go/internal/record/ticketsender.go | 178 + .../internal/tokenmanager/tokenmanager.go | 70 + .../google/s2a-go/internal/v2/README.md | 1 + .../internal/v2/certverifier/certverifier.go | 122 + .../testdata/client_intermediate_cert.der | Bin 0 -> 998 bytes .../testdata/client_leaf_cert.der | Bin 0 -> 1147 bytes .../testdata/client_root_cert.der | Bin 0 -> 1013 bytes .../testdata/server_intermediate_cert.der | Bin 0 -> 998 bytes .../testdata/server_leaf_cert.der | Bin 0 -> 1147 bytes .../testdata/server_root_cert.der | Bin 0 -> 1013 bytes .../internal/v2/remotesigner/remotesigner.go | 186 + .../v2/remotesigner/testdata/client_cert.der | Bin 0 -> 1013 bytes .../v2/remotesigner/testdata/client_cert.pem | 24 + .../v2/remotesigner/testdata/client_key.pem | 27 + .../v2/remotesigner/testdata/server_cert.der | Bin 0 -> 1013 bytes .../v2/remotesigner/testdata/server_cert.pem | 24 + .../v2/remotesigner/testdata/server_key.pem | 27 + .../google/s2a-go/internal/v2/s2av2.go | 391 + .../internal/v2/testdata/client_cert.pem | 24 + .../internal/v2/testdata/client_key.pem | 27 + .../internal/v2/testdata/server_cert.pem | 24 + .../internal/v2/testdata/server_key.pem | 27 + .../tlsconfigstore/testdata/client_cert.pem | 24 + .../v2/tlsconfigstore/testdata/client_key.pem | 27 + .../tlsconfigstore/testdata/server_cert.pem | 24 + .../v2/tlsconfigstore/testdata/server_key.pem | 27 + .../v2/tlsconfigstore/tlsconfigstore.go | 404 + .../github.com/google/s2a-go/retry/retry.go | 144 + .../vendor/github.com/google/s2a-go/s2a.go | 427 + .../github.com/google/s2a-go/s2a_options.go | 215 + .../github.com/google/s2a-go/s2a_utils.go | 79 + .../google/s2a-go/stream/s2a_stream.go | 34 + .../google/s2a-go/testdata/client_cert.pem | 24 + .../google/s2a-go/testdata/client_key.pem | 27 + .../s2a-go/testdata/mds_client_cert.pem | 19 + .../google/s2a-go/testdata/mds_client_key.pem | 28 + .../google/s2a-go/testdata/mds_root_cert.pem | 21 + .../s2a-go/testdata/mds_server_cert.pem | 21 + .../google/s2a-go/testdata/mds_server_key.pem | 28 + .../s2a-go/testdata/self_signed_cert.pem | 19 + .../s2a-go/testdata/self_signed_key.pem | 28 + .../google/s2a-go/testdata/server_cert.pem | 24 + .../google/s2a-go/testdata/server_key.pem | 27 + .../github.com/google/uuid/CHANGELOG.md | 41 + .../github.com/google/uuid/CONTRIBUTING.md | 26 + .../github.com/google/uuid/CONTRIBUTORS | 9 + .../vendor/github.com/google/uuid/LICENSE | 27 + .../vendor/github.com/google/uuid/README.md | 21 + .../vendor/github.com/google/uuid/dce.go | 80 + .../vendor/github.com/google/uuid/doc.go | 12 + .../vendor/github.com/google/uuid/hash.go | 59 + .../vendor/github.com/google/uuid/marshal.go | 38 + .../vendor/github.com/google/uuid/node.go | 90 + .../vendor/github.com/google/uuid/node_js.go | 12 + .../vendor/github.com/google/uuid/node_net.go | 33 + .../vendor/github.com/google/uuid/null.go | 118 + .../vendor/github.com/google/uuid/sql.go | 59 + .../vendor/github.com/google/uuid/time.go | 134 + .../vendor/github.com/google/uuid/util.go | 43 + .../vendor/github.com/google/uuid/uuid.go | 365 + .../vendor/github.com/google/uuid/version1.go | 44 + .../vendor/github.com/google/uuid/version4.go | 76 + .../vendor/github.com/google/uuid/version6.go | 56 + .../vendor/github.com/google/uuid/version7.go | 104 + .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 219 + .../client/util/util.go | 100 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 114 + .../googleapis/gax-go/v2/apierror/apierror.go | 101 +- .../internal/proto/custom_error.pb.go | 256 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 110 +- .../googleapis/gax-go/v2/call_option.go | 76 + .../googleapis/gax-go/v2/callctx/callctx.go | 82 + .../googleapis/gax-go/v2/content_type.go | 112 + .../github.com/googleapis/gax-go/v2/gax.go | 4 +- .../github.com/googleapis/gax-go/v2/header.go | 124 +- .../googleapis/gax-go/v2/internal/version.go | 33 + .../github.com/googleapis/gax-go/v2/invoke.go | 10 + .../googleapis/gax-go/v2/proto_json_stream.go | 126 + .../gax-go/v2/release-please-config.json | 10 + .../vendor/go.opencensus.io/Makefile | 8 +- .../vendor/go.opencensus.io/opencensus.go | 2 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- .../vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + .../vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 6 +- .../go.opencensus.io/stats/view/collector.go | 9 +- .../vendor/go.opencensus.io/stats/view/doc.go | 2 +- .../go.opencensus.io/stats/view/worker.go | 27 +- .../vendor/go.opencensus.io/tag/profile_19.go | 1 + .../go.opencensus.io/tag/profile_not19.go | 1 + .../vendor/go.opencensus.io/trace/doc.go | 13 +- .../vendor/go.opencensus.io/trace/lrumap.go | 2 +- .../go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../instrumentation/net/http/otelhttp/LICENSE | 201 + .../net/http/otelhttp/client.go | 61 + .../net/http/otelhttp/common.go | 60 + .../net/http/otelhttp/config.go | 207 + .../instrumentation/net/http/otelhttp/doc.go | 18 + .../net/http/otelhttp/handler.go | 283 + .../http/otelhttp/internal/semconvutil/gen.go | 21 + .../otelhttp/internal/semconvutil/httpconv.go | 587 + .../otelhttp/internal/semconvutil/netconv.go | 215 + .../net/http/otelhttp/labeler.go | 65 + .../net/http/otelhttp/transport.go | 286 + .../net/http/otelhttp/version.go | 28 + .../instrumentation/net/http/otelhttp/wrap.go | 99 + .../go.opentelemetry.io/otel/.codespellignore | 5 + .../go.opentelemetry.io/otel/.codespellrc | 10 + .../go.opentelemetry.io/otel/.gitattributes | 3 + .../go.opentelemetry.io/otel/.gitignore | 22 + .../go.opentelemetry.io/otel/.gitmodules | 3 + .../go.opentelemetry.io/otel/.golangci.yml | 296 + .../go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + .../go.opentelemetry.io/otel/CHANGELOG.md | 2909 ++ .../go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 645 + .../vendor/go.opentelemetry.io/otel/LICENSE | 201 + .../vendor/go.opentelemetry.io/otel/Makefile | 318 + .../vendor/go.opentelemetry.io/otel/README.md | 108 + .../go.opentelemetry.io/otel/RELEASING.md | 139 + .../go.opentelemetry.io/otel/VERSIONING.md | 224 + .../go.opentelemetry.io/otel/attribute/doc.go | 16 + .../otel/attribute/encoder.go | 146 + .../otel/attribute/filter.go | 60 + .../otel/attribute/iterator.go | 161 + .../go.opentelemetry.io/otel/attribute/key.go | 134 + .../go.opentelemetry.io/otel/attribute/kv.go | 86 + .../go.opentelemetry.io/otel/attribute/set.go | 452 + .../otel/attribute/type_string.go | 31 + .../otel/attribute/value.go | 270 + .../otel/baggage/baggage.go | 744 + .../otel/baggage/context.go | 39 + .../go.opentelemetry.io/otel/baggage/doc.go | 20 + .../go.opentelemetry.io/otel/codes/codes.go | 116 + .../go.opentelemetry.io/otel/codes/doc.go | 21 + .../vendor/go.opentelemetry.io/otel/doc.go | 34 + .../go.opentelemetry.io/otel/error_handler.go | 38 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 41 + .../go.opentelemetry.io/otel/handler.go | 48 + .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/baggage/baggage.go | 43 + .../otel/internal/baggage/context.go | 92 + .../go.opentelemetry.io/otel/internal/gen.go | 29 + .../otel/internal/global/handler.go | 102 + .../otel/internal/global/instruments.go | 371 + .../otel/internal/global/internal_logging.go | 69 + .../otel/internal/global/meter.go | 354 + .../otel/internal/global/propagator.go | 82 + .../otel/internal/global/state.go | 156 + .../otel/internal/global/trace.go | 199 + .../otel/internal/rawhelpers.go | 55 + .../otel/internal_logging.go | 26 + .../vendor/go.opentelemetry.io/otel/metric.go | 53 + .../go.opentelemetry.io/otel/metric/LICENSE | 201 + .../otel/metric/asyncfloat64.go | 271 + .../otel/metric/asyncint64.go | 269 + .../go.opentelemetry.io/otel/metric/config.go | 92 + .../go.opentelemetry.io/otel/metric/doc.go | 170 + .../otel/metric/embedded/embedded.go | 234 + .../otel/metric/instrument.go | 357 + .../go.opentelemetry.io/otel/metric/meter.go | 212 + .../otel/metric/syncfloat64.go | 185 + .../otel/metric/syncint64.go | 185 + .../go.opentelemetry.io/otel/propagation.go | 31 + .../otel/propagation/baggage.go | 58 + .../otel/propagation/doc.go | 24 + .../otel/propagation/propagation.go | 153 + .../otel/propagation/trace_context.go | 167 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/semconv/v1.20.0/attribute_group.go | 1209 + .../otel/semconv/v1.20.0/doc.go | 20 + .../otel/semconv/v1.20.0/event.go | 199 + .../otel/semconv/v1.20.0/exception.go | 20 + .../otel/semconv/v1.20.0/http.go | 21 + .../otel/semconv/v1.20.0/resource.go | 2071 ++ .../otel/semconv/v1.20.0/schema.go | 20 + .../otel/semconv/v1.20.0/trace.go | 2610 ++ .../vendor/go.opentelemetry.io/otel/trace.go | 47 + .../go.opentelemetry.io/otel/trace/LICENSE | 201 + .../go.opentelemetry.io/otel/trace/config.go | 334 + .../go.opentelemetry.io/otel/trace/context.go | 61 + .../go.opentelemetry.io/otel/trace/doc.go | 130 + .../otel/trace/embedded/embedded.go | 56 + .../otel/trace/nonrecording.go | 27 + .../go.opentelemetry.io/otel/trace/noop.go | 93 + .../go.opentelemetry.io/otel/trace/trace.go | 577 + .../otel/trace/tracestate.go | 331 + .../otel/verify_examples.sh | 85 + .../go.opentelemetry.io/otel/version.go | 20 + .../go.opentelemetry.io/otel/versions.yaml | 52 + .../vendor/golang.org/x/crypto/LICENSE | 27 + .../vendor/golang.org/x/crypto/PATENTS | 22 + .../x/crypto/chacha20/chacha_arm64.go | 16 + .../x/crypto/chacha20/chacha_arm64.s | 307 + .../x/crypto/chacha20/chacha_generic.go | 398 + .../x/crypto/chacha20/chacha_noasm.go | 13 + .../x/crypto/chacha20/chacha_ppc64le.go | 16 + .../x/crypto/chacha20/chacha_ppc64le.s | 449 + .../x/crypto/chacha20/chacha_s390x.go | 27 + .../x/crypto/chacha20/chacha_s390x.s | 224 + .../golang.org/x/crypto/chacha20/xor.go | 42 + .../chacha20poly1305/chacha20poly1305.go | 98 + .../chacha20poly1305_amd64.go | 86 + .../chacha20poly1305/chacha20poly1305_amd64.s | 2715 ++ .../chacha20poly1305_generic.go | 81 + .../chacha20poly1305_noasm.go | 15 + .../chacha20poly1305/xchacha20poly1305.go | 86 + .../golang.org/x/crypto/cryptobyte/asn1.go | 825 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 350 + .../golang.org/x/crypto/cryptobyte/string.go | 183 + .../vendor/golang.org/x/crypto/hkdf/hkdf.go | 95 + .../x/crypto/internal/alias/alias.go | 31 + .../x/crypto/internal/alias/alias_purego.go | 34 + .../x/crypto/internal/poly1305/mac_noasm.go | 9 + .../x/crypto/internal/poly1305/poly1305.go | 99 + .../x/crypto/internal/poly1305/sum_amd64.go | 47 + .../x/crypto/internal/poly1305/sum_amd64.s | 108 + .../x/crypto/internal/poly1305/sum_generic.go | 312 + .../x/crypto/internal/poly1305/sum_ppc64le.go | 47 + .../x/crypto/internal/poly1305/sum_ppc64le.s | 181 + .../x/crypto/internal/poly1305/sum_s390x.go | 76 + .../x/crypto/internal/poly1305/sum_s390x.s | 503 + .../x/net/context/ctxhttp/ctxhttp.go | 71 - .../vendor/golang.org/x/net/context/go17.go | 1 - .../vendor/golang.org/x/net/context/go19.go | 1 - .../golang.org/x/net/context/pre_go17.go | 1 - .../golang.org/x/net/context/pre_go19.go | 1 - .../golang.org/x/net/http2/databuffer.go | 59 +- .../vendor/golang.org/x/net/http2/frame.go | 11 +- .../vendor/golang.org/x/net/http2/go111.go | 30 - .../vendor/golang.org/x/net/http2/go115.go | 27 - .../vendor/golang.org/x/net/http2/go118.go | 17 - .../golang.org/x/net/http2/not_go111.go | 21 - .../golang.org/x/net/http2/not_go115.go | 31 - .../golang.org/x/net/http2/not_go118.go | 17 - .../vendor/golang.org/x/net/http2/server.go | 24 +- .../golang.org/x/net/http2/transport.go | 33 +- .../vendor/golang.org/x/net/idna/go118.go | 1 - .../golang.org/x/net/idna/idna10.0.0.go | 1 - .../vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - .../vendor/golang.org/x/net/idna/pre_go118.go | 1 - .../golang.org/x/net/idna/tables10.0.0.go | 1 - .../golang.org/x/net/idna/tables11.0.0.go | 1 - .../golang.org/x/net/idna/tables12.0.0.go | 1 - .../golang.org/x/net/idna/tables13.0.0.go | 1 - .../golang.org/x/net/idna/tables15.0.0.go | 1 - .../golang.org/x/net/idna/tables9.0.0.go | 1 - .../golang.org/x/net/idna/trie12.0.0.go | 1 - .../golang.org/x/net/idna/trie13.0.0.go | 1 - .../vendor/golang.org/x/oauth2/AUTHORS | 3 - .../vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - .../vendor/golang.org/x/oauth2/README.md | 12 +- .../x/oauth2/authhandler/authhandler.go | 44 +- .../vendor/golang.org/x/oauth2/deviceauth.go | 198 + .../x/oauth2/google/appengine_gen1.go | 1 - .../x/oauth2/google/appengine_gen2_flex.go | 1 - .../golang.org/x/oauth2/google/default.go | 174 +- .../vendor/golang.org/x/oauth2/google/doc.go | 80 +- .../golang.org/x/oauth2/google/error.go | 64 + .../golang.org/x/oauth2/google/google.go | 80 +- .../google/internal/externalaccount/aws.go | 126 +- .../externalaccount/basecredentials.go | 108 +- .../externalaccount/executablecredsource.go | 313 + .../externalaccount/filecredsource.go | 4 + .../google/internal/externalaccount/header.go | 64 + .../internal/externalaccount/impersonate.go | 9 +- .../internal/externalaccount/urlcredsource.go | 4 + .../externalaccountauthorizeduser.go | 114 + .../clientauth.go | 8 +- .../sts_exchange.go | 42 +- .../vendor/golang.org/x/oauth2/google/jwt.go | 3 +- .../x/oauth2/internal/client_appengine.go | 1 - .../golang.org/x/oauth2/internal/oauth2.go | 2 +- .../golang.org/x/oauth2/internal/token.go | 134 +- .../vendor/golang.org/x/oauth2/jws/jws.go | 2 +- .../vendor/golang.org/x/oauth2/oauth2.go | 64 +- .../vendor/golang.org/x/oauth2/pkce.go | 68 + .../vendor/golang.org/x/oauth2/token.go | 35 +- .../golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + .../vendor/golang.org/x/sys/cpu/byteorder.go | 66 + .../vendor/golang.org/x/sys/cpu/cpu.go | 290 + .../vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + .../golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 + .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + .../golang.org/x/sys/cpu/cpu_loong64.go | 12 + .../golang.org/x/sys/cpu/cpu_mips64x.go | 15 + .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + .../golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + .../vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + .../golang.org/x/sys/cpu/cpu_riscv64.go | 11 + .../vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + .../vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + .../vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 + .../vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 + .../vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + .../golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + .../vendor/golang.org/x/sys/cpu/endian_big.go | 10 + .../golang.org/x/sys/cpu/endian_little.go | 10 + .../golang.org/x/sys/cpu/hwcap_linux.go | 71 + .../vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + .../golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 - .../golang.org/x/sys/plan9/pwd_plan9.go | 1 - .../vendor/golang.org/x/sys/plan9/race.go | 1 - .../vendor/golang.org/x/sys/plan9/race0.go | 1 - .../vendor/golang.org/x/sys/plan9/str.go | 1 - .../vendor/golang.org/x/sys/plan9/syscall.go | 1 - .../x/sys/plan9/zsyscall_plan9_386.go | 1 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 - .../x/sys/plan9/zsyscall_plan9_arm.go | 1 - .../vendor/golang.org/x/sys/unix/aliases.go | 2 - .../golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - .../golang.org/x/sys/unix/asm_bsd_386.s | 2 - .../golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_arm.s | 2 - .../golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - .../golang.org/x/sys/unix/asm_zos_s390x.s | 3 - .../golang.org/x/sys/unix/cap_freebsd.go | 1 - .../vendor/golang.org/x/sys/unix/constants.go | 1 - .../golang.org/x/sys/unix/dev_aix_ppc.go | 1 - .../golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - .../vendor/golang.org/x/sys/unix/dev_zos.go | 1 - .../vendor/golang.org/x/sys/unix/dirent.go | 1 - .../golang.org/x/sys/unix/endian_big.go | 1 - .../golang.org/x/sys/unix/endian_little.go | 1 - .../vendor/golang.org/x/sys/unix/env_unix.go | 1 - .../vendor/golang.org/x/sys/unix/epoll_zos.go | 1 - .../vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 - .../vendor/golang.org/x/sys/unix/fdset.go | 1 - .../golang.org/x/sys/unix/fstatfs_zos.go | 1 - .../vendor/golang.org/x/sys/unix/gccgo.go | 1 - .../vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - .../golang.org/x/sys/unix/ifreq_linux.go | 1 - .../golang.org/x/sys/unix/ioctl_linux.go | 5 + .../golang.org/x/sys/unix/ioctl_signed.go | 1 - .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - .../vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - .../vendor/golang.org/x/sys/unix/mkerrors.sh | 43 +- .../golang.org/x/sys/unix/mmap_nomremap.go | 1 - .../vendor/golang.org/x/sys/unix/mremap.go | 1 - .../golang.org/x/sys/unix/pagesize_unix.go | 1 - .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- .../golang.org/x/sys/unix/ptrace_darwin.go | 1 - .../golang.org/x/sys/unix/ptrace_ios.go | 1 - .../vendor/golang.org/x/sys/unix/race.go | 1 - .../vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 1 - .../golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - .../vendor/golang.org/x/sys/unix/syscall.go | 1 - .../golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - .../golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_darwin_libSystem.go | 1 - .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_hurd.go | 1 - .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - .../golang.org/x/sys/unix/syscall_linux.go | 33 +- .../x/sys/unix/syscall_linux_386.go | 1 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 1 - .../x/sys/unix/syscall_linux_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 1 - .../x/sys/unix/syscall_linux_mips64x.go | 2 - .../x/sys/unix/syscall_linux_mipsx.go | 2 - .../x/sys/unix/syscall_linux_ppc.go | 1 - .../x/sys/unix/syscall_linux_ppc64x.go | 2 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 28 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 5 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/syscall_unix.go | 1 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 3 +- .../golang.org/x/sys/unix/sysvshm_linux.go | 1 - .../golang.org/x/sys/unix/sysvshm_unix.go | 1 - .../x/sys/unix/sysvshm_unix_other.go | 1 - .../golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- .../vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 1 - .../x/sys/unix/zerrors_darwin_arm64.go | 1 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - .../golang.org/x/sys/unix/zerrors_linux.go | 104 +- .../x/sys/unix/zerrors_linux_386.go | 4 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 4 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_loong64.go | 5 +- .../x/sys/unix/zerrors_linux_mips.go | 4 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- .../x/sys/unix/zerrors_linux_ppc.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_riscv64.go | 7 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 1 - .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 1 - .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_386.go | 1 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 - .../x/sys/unix/zsyscall_illumos_amd64.go | 1 - .../golang.org/x/sys/unix/zsyscall_linux.go | 26 +- .../x/sys/unix/zsyscall_linux_386.go | 1 - .../x/sys/unix/zsyscall_linux_amd64.go | 1 - .../x/sys/unix/zsyscall_linux_arm.go | 1 - .../x/sys/unix/zsyscall_linux_arm64.go | 1 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 1 - .../x/sys/unix/zsyscall_linux_mips64.go | 1 - .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - .../x/sys/unix/zsyscall_linux_ppc.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - .../x/sys/unix/zsyscall_linux_s390x.go | 1 - .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - .../x/sys/unix/zsyscall_netbsd_386.go | 1 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 - .../x/sys/unix/zsyscall_openbsd_386.go | 70 +- .../x/sys/unix/zsyscall_openbsd_386.s | 20 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 20 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 20 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 20 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1 - .../x/sys/unix/zsyscall_zos_s390x.go | 1 - .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 6 +- .../x/sys/unix/zsysnum_linux_amd64.go | 6 +- .../x/sys/unix/zsysnum_linux_arm.go | 6 +- .../x/sys/unix/zsysnum_linux_arm64.go | 6 +- .../x/sys/unix/zsysnum_linux_loong64.go | 6 +- .../x/sys/unix/zsysnum_linux_mips.go | 6 +- .../x/sys/unix/zsysnum_linux_mips64.go | 6 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 6 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 6 +- .../x/sys/unix/zsysnum_linux_ppc.go | 6 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 6 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 6 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 6 +- .../x/sys/unix/zsysnum_linux_s390x.go | 6 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 6 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 1 - .../x/sys/unix/ztypes_darwin_arm64.go | 1 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 1 - .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_arm.go | 1 - .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux.go | 170 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 - .../x/sys/unix/ztypes_linux_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 - .../x/sys/unix/ztypes_linux_arm64.go | 1 - .../x/sys/unix/ztypes_linux_loong64.go | 1 - .../x/sys/unix/ztypes_linux_mips.go | 1 - .../x/sys/unix/ztypes_linux_mips64.go | 1 - .../x/sys/unix/ztypes_linux_mips64le.go | 1 - .../x/sys/unix/ztypes_linux_mipsle.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 1 - .../x/sys/unix/ztypes_linux_ppc64.go | 1 - .../x/sys/unix/ztypes_linux_ppc64le.go | 1 - .../x/sys/unix/ztypes_linux_riscv64.go | 1 - .../x/sys/unix/ztypes_linux_s390x.go | 1 - .../x/sys/unix/ztypes_linux_sparc64.go | 1 - .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 1 - .../golang.org/x/sys/windows/aliases.go | 1 - .../vendor/golang.org/x/sys/windows/empty.s | 1 - .../golang.org/x/sys/windows/env_windows.go | 17 +- .../golang.org/x/sys/windows/eventlog.go | 1 - .../golang.org/x/sys/windows/mksyscall.go | 1 - .../vendor/golang.org/x/sys/windows/race.go | 1 - .../vendor/golang.org/x/sys/windows/race0.go | 1 - .../golang.org/x/sys/windows/service.go | 1 - .../vendor/golang.org/x/sys/windows/str.go | 1 - .../golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 10 +- .../golang.org/x/sys/windows/types_windows.go | 28 +- .../x/sys/windows/zsyscall_windows.go | 37 + .../vendor/golang.org/x/term/term_unix.go | 1 - .../vendor/golang.org/x/term/term_unix_bsd.go | 1 - .../golang.org/x/term/term_unix_other.go | 1 - .../golang.org/x/term/term_unsupported.go | 1 - .../x/text/secure/bidirule/bidirule10.0.0.go | 1 - .../x/text/secure/bidirule/bidirule9.0.0.go | 1 - .../x/text/unicode/bidi/tables10.0.0.go | 1 - .../x/text/unicode/bidi/tables11.0.0.go | 1 - .../x/text/unicode/bidi/tables12.0.0.go | 1 - .../x/text/unicode/bidi/tables13.0.0.go | 1 - .../x/text/unicode/bidi/tables15.0.0.go | 1 - .../x/text/unicode/bidi/tables9.0.0.go | 1 - .../x/text/unicode/norm/tables10.0.0.go | 1 - .../x/text/unicode/norm/tables11.0.0.go | 1 - .../x/text/unicode/norm/tables12.0.0.go | 1 - .../x/text/unicode/norm/tables13.0.0.go | 1 - .../x/text/unicode/norm/tables15.0.0.go | 1 - .../x/text/unicode/norm/tables9.0.0.go | 1 - .../vendor/golang.org/x/time/AUTHORS | 3 - .../vendor/golang.org/x/time/CONTRIBUTORS | 3 - .../vendor/golang.org/x/time/rate/rate.go | 125 +- .../golang.org/x/time/rate/sometimes.go | 67 + .../api/googleapi/googleapi.go | 26 +- .../api/googleapi/transport/apikey.go | 2 +- .../google.golang.org/api/internal/cba.go | 318 + .../api/internal/cert/default_cert.go | 58 + .../api/internal/cert/enterprise_cert.go | 54 + .../cert/secureconnect_cert.go} | 81 +- .../google.golang.org/api/internal/creds.go | 129 +- .../api/internal/gensupport/error.go | 24 + .../api/internal/gensupport/json.go | 33 +- .../api/internal/gensupport/media.go | 129 +- .../api/internal/gensupport/params.go | 2 +- .../api/internal/gensupport/resumable.go | 38 +- .../api/internal/gensupport/retry.go | 11 + .../api/internal/gensupport/send.go | 77 +- .../api/internal/impersonate/impersonate.go | 3 +- .../google.golang.org/api/internal/s2a.go | 136 + .../api/internal/settings.go | 86 + .../google.golang.org/api/internal/version.go | 2 +- .../api/logging/v2/logging-api.json | 3248 +- .../api/logging/v2/logging-gen.go | 27866 +++++++++++----- .../api/option/credentials_go19.go | 24 - .../api/option/credentials_notgo19.go | 23 - .../option/internaloption/internaloption.go | 78 + .../google.golang.org/api/option/option.go | 46 +- .../transport/http/configure_http2_go116.go | 26 - .../http/configure_http2_not_go116.go | 17 - .../transport/http/default_transport_go113.go | 21 - .../http/default_transport_not_go113.go | 16 - .../api/transport/http/dial.go | 83 +- .../api/transport/http/dial_appengine.go | 21 - .../api/transport/internal/dca/dca.go | 143 - .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- .../google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 347 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 9 +- .../genproto/googleapis/rpc/LICENSE | 202 + .../genproto/googleapis/rpc/code/code.pb.go | 28 +- .../rpc/errdetails/error_details.pb.go | 374 +- .../googleapis/rpc/status/status.pb.go | 10 +- .../google.golang.org/grpc/CONTRIBUTING.md | 32 +- .../vendor/google.golang.org/grpc/README.md | 60 +- .../grpc/attributes/attributes.go | 74 +- .../vendor/google.golang.org/grpc/backoff.go | 2 +- .../grpc/balancer/balancer.go | 133 +- .../grpc/balancer/base/balancer.go | 30 +- .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 292 - .../grpc/balancer_wrapper.go | 380 + .../grpc_binarylog_v1/binarylog.pb.go | 24 +- .../vendor/google.golang.org/grpc/call.go | 6 +- .../grpc/channelz/channelz.go | 36 + .../google.golang.org/grpc/clientconn.go | 1305 +- .../vendor/google.golang.org/grpc/codec.go | 8 +- .../grpc/codes/code_string.go | 51 +- .../google.golang.org/grpc/codes/codes.go | 8 +- .../grpc/credentials/credentials.go | 20 +- .../grpc/credentials/insecure/insecure.go | 26 + .../google.golang.org/grpc/credentials/tls.go | 81 +- .../google.golang.org/grpc/dialoptions.go | 239 +- .../grpc/encoding/encoding.go | 26 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 65 +- .../google.golang.org/grpc/interceptor.go | 21 +- .../grpc/internal/backoff/backoff.go | 36 + .../balancer/gracefulswitch/gracefulswitch.go | 385 + .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/binarylog.go | 118 +- .../grpc/internal/binarylog/env_config.go | 26 +- .../grpc/internal/binarylog/method_logger.go | 165 +- .../grpc/internal/binarylog/sink.go | 12 +- .../grpc/internal/buffer/unbounded.go | 63 +- .../grpc/internal/channelz/funcs.go | 252 +- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 44 +- .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../grpc/internal/envconfig/envconfig.go | 46 +- .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 49 +- .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpclog/grpclog.go | 42 +- .../grpc/internal/grpclog/prefixLogger.go | 20 +- .../grpc/internal/grpcrand/grpcrand.go | 28 + .../internal/grpcsync/callback_serializer.go | 100 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/method.go | 6 +- .../grpc/internal/idle/idle.go | 278 + .../grpc/internal/internal.go | 166 +- .../grpc/internal/metadata/metadata.go | 60 +- .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 4 +- .../internal/resolver/dns/dns_resolver.go | 139 +- .../resolver/dns/internal/internal.go | 70 + .../resolver/passthrough/passthrough.go | 11 +- .../grpc/internal/resolver/unix/unix.go | 13 +- .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 8 +- .../grpc/internal/status/status.go | 46 +- .../grpc/internal/tcp_keepalive_others.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/controlbuf.go | 152 +- .../grpc/internal/transport/defaults.go | 6 + .../grpc/internal/transport/handler_server.go | 150 +- .../grpc/internal/transport/http2_client.go | 484 +- .../grpc/internal/transport/http2_server.go | 483 +- .../grpc/internal/transport/http_util.go | 137 +- .../grpc/internal/transport/logging.go | 40 + .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 81 +- .../grpc/metadata/metadata.go | 101 +- .../google.golang.org/grpc/peer/peer.go | 2 + .../google.golang.org/grpc/picker_wrapper.go | 92 +- .../google.golang.org/grpc/pickfirst.go | 204 +- .../google.golang.org/grpc/preloader.go | 4 +- .../google.golang.org/grpc/regenerate.sh | 26 +- .../grpc/resolver/dns/dns_resolver.go | 36 + .../google.golang.org/grpc/resolver/map.go | 174 +- .../grpc/resolver/resolver.go | 164 +- .../grpc/resolver_conn_wrapper.go | 187 - .../grpc/resolver_wrapper.go | 197 + .../vendor/google.golang.org/grpc/rpc_util.go | 159 +- .../vendor/google.golang.org/grpc/server.go | 1047 +- .../google.golang.org/grpc/service_config.go | 95 +- .../grpc/serviceconfig/serviceconfig.go | 2 +- .../grpc/shared_buffer_pool.go | 154 + .../google.golang.org/grpc/stats/stats.go | 36 +- .../google.golang.org/grpc/status/status.go | 67 +- .../vendor/google.golang.org/grpc/stream.go | 624 +- .../vendor/google.golang.org/grpc/tap/tap.go | 8 +- .../vendor/google.golang.org/grpc/trace.go | 6 +- .../vendor/google.golang.org/grpc/version.go | 2 +- .../vendor/google.golang.org/grpc/vet.sh | 207 +- .../vendor/google.golang.org/protobuf/AUTHORS | 3 - .../google.golang.org/protobuf/CONTRIBUTORS | 3 - .../protobuf/encoding/protojson/decode.go | 210 +- .../protobuf/encoding/protojson/doc.go | 4 +- .../protobuf/encoding/protojson/encode.go | 102 +- .../encoding/protojson/well_known_types.go | 143 +- .../protobuf/encoding/prototext/decode.go | 124 +- .../protobuf/encoding/prototext/encode.go | 57 +- .../protobuf/encoding/protowire/wire.go | 59 +- .../protobuf/internal/descfmt/stringer.go | 229 +- .../internal/encoding/defval/default.go | 78 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/json/encode.go | 10 +- .../encoding/messageset/messageset.go | 7 +- .../protobuf/internal/encoding/tag/tag.go | 96 +- .../protobuf/internal/encoding/text/decode.go | 37 +- .../internal/encoding/text/decode_number.go | 49 +- .../protobuf/internal/encoding/text/doc.go | 4 +- .../protobuf/internal/encoding/text/encode.go | 10 +- .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../protobuf/internal/filedesc/build.go | 19 +- .../protobuf/internal/filedesc/desc.go | 427 +- .../protobuf/internal/filedesc/desc_init.go | 36 +- .../protobuf/internal/filedesc/desc_lazy.go | 80 +- .../protobuf/internal/filedesc/desc_list.go | 167 +- .../protobuf/internal/filedesc/placeholder.go | 136 +- .../protobuf/internal/filetype/build.go | 87 +- .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/genid/descriptor_gen.go | 306 +- .../protobuf/internal/genid/type_gen.go | 6 + .../protobuf/internal/impl/api_export.go | 42 +- .../protobuf/internal/impl/checkinit.go | 12 +- .../protobuf/internal/impl/codec_extension.go | 36 +- .../protobuf/internal/impl/codec_field.go | 90 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_map.go | 20 +- .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_message.go | 30 +- .../protobuf/internal/impl/codec_reflect.go | 1 + .../protobuf/internal/impl/codec_tables.go | 290 +- .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/convert.go | 229 +- .../protobuf/internal/impl/convert_list.go | 42 +- .../protobuf/internal/impl/convert_map.go | 32 +- .../protobuf/internal/impl/decode.go | 29 +- .../protobuf/internal/impl/enum.go | 10 +- .../protobuf/internal/impl/extension.go | 26 +- .../protobuf/internal/impl/legacy_enum.go | 57 +- .../protobuf/internal/impl/legacy_export.go | 18 +- .../internal/impl/legacy_extension.go | 100 +- .../protobuf/internal/impl/legacy_message.go | 141 +- .../protobuf/internal/impl/merge.go | 32 +- .../protobuf/internal/impl/message.go | 58 +- .../protobuf/internal/impl/message_reflect.go | 74 +- .../internal/impl/message_reflect_field.go | 118 +- .../protobuf/internal/impl/pointer_reflect.go | 37 + .../protobuf/internal/impl/pointer_unsafe.go | 41 + .../protobuf/internal/impl/validate.go | 50 +- .../protobuf/internal/impl/weak.go | 16 +- .../protobuf/internal/order/order.go | 18 +- .../protobuf/internal/order/range.go | 22 +- .../protobuf/internal/strs/strings_pure.go | 1 + ...ings_unsafe.go => strings_unsafe_go120.go} | 11 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 56 +- .../protobuf/proto/decode.go | 22 +- .../google.golang.org/protobuf/proto/doc.go | 82 +- .../protobuf/proto/encode.go | 7 +- .../google.golang.org/protobuf/proto/equal.go | 178 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../google.golang.org/protobuf/proto/size.go | 10 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 24 + .../reflect/protodesc/desc_resolve.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 177 + .../reflect/protodesc/editions_defaults.binpb | 4 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/methods.go | 1 + .../protobuf/reflect/protoreflect/proto.go | 91 +- .../protobuf/reflect/protoreflect/source.go | 1 + .../reflect/protoreflect/source_gen.go | 93 + .../protobuf/reflect/protoreflect/type.go | 45 +- .../protobuf/reflect/protoreflect/value.go | 26 +- .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 65 +- ...{value_unsafe.go => value_unsafe_go120.go} | 3 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 28 +- .../protobuf/runtime/protoiface/methods.go | 1 + .../protobuf/runtime/protoimpl/version.go | 8 +- .../types/descriptorpb/descriptor.pb.go | 2739 +- .../protobuf/types/known/anypb/any.pb.go | 136 +- .../types/known/durationpb/duration.pb.go | 63 +- .../types/known/timestamppb/timestamp.pb.go | 63 +- event-exporter/vendor/modules.txt | 159 +- 1022 files changed, 92875 insertions(+), 19728 deletions(-) create mode 100644 event-exporter/vendor/cloud.google.com/go/compute/internal/version.go create mode 100644 event-exporter/vendor/cloud.google.com/go/compute/metadata/CHANGES.md rename event-exporter/vendor/{google.golang.org/genproto => cloud.google.com/go/compute/metadata}/LICENSE (100%) create mode 100644 event-exporter/vendor/cloud.google.com/go/compute/metadata/README.md create mode 100644 event-exporter/vendor/cloud.google.com/go/compute/metadata/tidyfix.go create mode 100644 event-exporter/vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename event-exporter/vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/README.md create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 event-exporter/vendor/github.com/go-logr/logr/context.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/context_slog.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/funcr/slogsink.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/sloghandler.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/slogr.go create mode 100644 event-exporter/vendor/github.com/go-logr/logr/slogsink.go create mode 100644 event-exporter/vendor/github.com/go-logr/stdr/LICENSE create mode 100644 event-exporter/vendor/github.com/go-logr/stdr/README.md create mode 100644 event-exporter/vendor/github.com/go-logr/stdr/stdr.go create mode 100644 event-exporter/vendor/github.com/golang/glog/glog_file_linux.go create mode 100644 event-exporter/vendor/github.com/golang/glog/glog_file_other.go create mode 100644 event-exporter/vendor/github.com/golang/glog/glog_file_posix.go create mode 100644 event-exporter/vendor/github.com/golang/glog/glog_flags.go create mode 100644 event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink.go create mode 100644 event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go create mode 100644 event-exporter/vendor/github.com/golang/glog/internal/stackdump/stackdump.go create mode 100644 event-exporter/vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 event-exporter/vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 event-exporter/vendor/github.com/golang/protobuf/jsonpb/json.go rename event-exporter/vendor/github.com/google/go-cmp/cmp/{export_unsafe.go => export.go} (94%) delete mode 100644 event-exporter/vendor/github.com/google/go-cmp/cmp/export_panic.go rename event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/{pointer_unsafe.go => pointer.go} (95%) delete mode 100644 event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go delete mode 100644 event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/.gitignore create mode 100644 event-exporter/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md create mode 100644 event-exporter/vendor/github.com/google/s2a-go/CONTRIBUTING.md create mode 100644 event-exporter/vendor/github.com/google/s2a-go/LICENSE.md create mode 100644 event-exporter/vendor/github.com/google/s2a-go/README.md create mode 100644 event-exporter/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/record.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/record/ticketsender.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/README.md create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/s2av2.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/retry/retry.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/s2a.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/s2a_options.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/s2a_utils.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/stream/s2a_stream.go create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/client_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/client_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/server_cert.pem create mode 100644 event-exporter/vendor/github.com/google/s2a-go/testdata/server_key.pem create mode 100644 event-exporter/vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 event-exporter/vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 event-exporter/vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 event-exporter/vendor/github.com/google/uuid/LICENSE create mode 100644 event-exporter/vendor/github.com/google/uuid/README.md create mode 100644 event-exporter/vendor/github.com/google/uuid/dce.go create mode 100644 event-exporter/vendor/github.com/google/uuid/doc.go create mode 100644 event-exporter/vendor/github.com/google/uuid/hash.go create mode 100644 event-exporter/vendor/github.com/google/uuid/marshal.go create mode 100644 event-exporter/vendor/github.com/google/uuid/node.go create mode 100644 event-exporter/vendor/github.com/google/uuid/node_js.go create mode 100644 event-exporter/vendor/github.com/google/uuid/node_net.go create mode 100644 event-exporter/vendor/github.com/google/uuid/null.go create mode 100644 event-exporter/vendor/github.com/google/uuid/sql.go create mode 100644 event-exporter/vendor/github.com/google/uuid/time.go create mode 100644 event-exporter/vendor/github.com/google/uuid/util.go create mode 100644 event-exporter/vendor/github.com/google/uuid/uuid.go create mode 100644 event-exporter/vendor/github.com/google/uuid/version1.go create mode 100644 event-exporter/vendor/github.com/google/uuid/version4.go create mode 100644 event-exporter/vendor/github.com/google/uuid/version6.go create mode 100644 event-exporter/vendor/github.com/google/uuid/version7.go create mode 100644 event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/internal/version.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 event-exporter/vendor/github.com/googleapis/gax-go/v2/release-please-config.json create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/LICENSE create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/Makefile create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/README.md create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/encoder.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/iterator.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/key.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/kv.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/set.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/type_string.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/attribute/value.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/codes/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/handler.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/gen.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/LICENSE create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/LICENSE create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/config.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/context.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/doc.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/nonrecording.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/noop.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/trace.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/trace/tracestate.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/version.go create mode 100644 event-exporter/vendor/go.opentelemetry.io/otel/versions.yaml create mode 100644 event-exporter/vendor/golang.org/x/crypto/LICENSE create mode 100644 event-exporter/vendor/golang.org/x/crypto/PATENTS create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_generic.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20/xor.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/hkdf/hkdf.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/alias/alias.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s delete mode 100644 event-exporter/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/go111.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/go115.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/go118.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 event-exporter/vendor/golang.org/x/net/http2/not_go118.go delete mode 100644 event-exporter/vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 event-exporter/vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 event-exporter/vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 event-exporter/vendor/golang.org/x/oauth2/google/error.go create mode 100644 event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go create mode 100644 event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go create mode 100644 event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go rename event-exporter/vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/clientauth.go (88%) rename event-exporter/vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/sts_exchange.go (68%) create mode 100644 event-exporter/vendor/golang.org/x/oauth2/pkce.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/parse.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 event-exporter/vendor/golang.org/x/time/AUTHORS delete mode 100644 event-exporter/vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 event-exporter/vendor/golang.org/x/time/rate/sometimes.go create mode 100644 event-exporter/vendor/google.golang.org/api/internal/cba.go create mode 100644 event-exporter/vendor/google.golang.org/api/internal/cert/default_cert.go create mode 100644 event-exporter/vendor/google.golang.org/api/internal/cert/enterprise_cert.go rename event-exporter/vendor/google.golang.org/api/{transport/cert/default_cert.go => internal/cert/secureconnect_cert.go} (57%) create mode 100644 event-exporter/vendor/google.golang.org/api/internal/gensupport/error.go create mode 100644 event-exporter/vendor/google.golang.org/api/internal/s2a.go delete mode 100644 event-exporter/vendor/google.golang.org/api/option/credentials_go19.go delete mode 100644 event-exporter/vendor/google.golang.org/api/option/credentials_notgo19.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/http/configure_http2_go116.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/http/default_transport_go113.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/http/dial_appengine.go delete mode 100644 event-exporter/vendor/google.golang.org/api/transport/internal/dca/dca.go delete mode 100644 event-exporter/vendor/google.golang.org/appengine/.travis.yml delete mode 100644 event-exporter/vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 event-exporter/vendor/google.golang.org/appengine/travis_test.sh create mode 100644 event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 event-exporter/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go delete mode 100644 event-exporter/vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/balancer_wrapper.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 event-exporter/vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 event-exporter/vendor/google.golang.org/grpc/shared_buffer_pool.go delete mode 100644 event-exporter/vendor/google.golang.org/protobuf/AUTHORS delete mode 100644 event-exporter/vendor/google.golang.org/protobuf/CONTRIBUTORS rename event-exporter/vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (87%) create mode 100644 event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go create mode 100644 event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb create mode 100644 event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go rename event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go diff --git a/event-exporter/go.mod b/event-exporter/go.mod index b81e363e6..309151a90 100644 --- a/event-exporter/go.mod +++ b/event-exporter/go.mod @@ -3,27 +3,33 @@ module github.com/GoogleCloudPlatform/k8s-stackdriver/event-exporter go 1.19 require ( - cloud.google.com/go/compute v1.5.0 - github.com/golang/glog v1.0.0 - github.com/google/go-cmp v0.5.7 + cloud.google.com/go/compute/metadata v0.2.3 + github.com/golang/glog v1.1.2 + github.com/google/go-cmp v0.6.0 github.com/prometheus/client_golang v1.12.1 - golang.org/x/net v0.17.0 - google.golang.org/api v0.70.0 + golang.org/x/net v0.21.0 + google.golang.org/api v0.167.0 k8s.io/api v0.23.4 k8s.io/apimachinery v0.23.4 k8s.io/client-go v0.23.4 ) require ( + cloud.google.com/go/compute v1.23.4 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.2.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.1 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect @@ -32,16 +38,21 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - go.opencensus.io v0.23.0 // indirect - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect - google.golang.org/grpc v1.44.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.61.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/event-exporter/go.sum b/event-exporter/go.sum index 18107a84d..934e17a34 100644 --- a/event-exporter/go.sum +++ b/event-exporter/go.sum @@ -18,25 +18,16 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= +cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -59,7 +50,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -67,17 +57,16 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -85,11 +74,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -102,11 +86,10 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -126,8 +109,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= @@ -137,8 +123,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -152,7 +138,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -169,9 +154,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -186,9 +171,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -196,7 +180,6 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -208,24 +191,24 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -311,39 +294,51 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -352,6 +347,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -375,7 +373,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -387,6 +384,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -426,12 +424,11 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -444,12 +441,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -461,6 +455,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -511,29 +507,19 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -543,14 +529,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -600,11 +587,8 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -631,27 +615,17 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0 h1:67zQnAE0T2rB0A3CwLSas0K+SbVzSxP+zTLkQLexeiw= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE= +google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -675,7 +649,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -693,32 +666,10 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 h1:U9V52f6rAgINH7kT+musA1qF8kWyVOxzF8eYuOVuFwQ= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= +google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -732,22 +683,13 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -760,8 +702,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -776,7 +719,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/event-exporter/vendor/cloud.google.com/go/compute/internal/version.go b/event-exporter/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 000000000..27a1970b9 --- /dev/null +++ b/event-exporter/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.23.4" diff --git a/event-exporter/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/event-exporter/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..06b957349 --- /dev/null +++ b/event-exporter/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/event-exporter/vendor/google.golang.org/genproto/LICENSE b/event-exporter/vendor/cloud.google.com/go/compute/metadata/LICENSE similarity index 100% rename from event-exporter/vendor/google.golang.org/genproto/LICENSE rename to event-exporter/vendor/cloud.google.com/go/compute/metadata/LICENSE diff --git a/event-exporter/vendor/cloud.google.com/go/compute/metadata/README.md b/event-exporter/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/event-exporter/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/event-exporter/vendor/cloud.google.com/go/compute/metadata/metadata.go b/event-exporter/vendor/cloud.google.com/go/compute/metadata/metadata.go index 111309f3d..c17faa142 100644 --- a/event-exporter/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/event-exporter/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,7 +16,7 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } @@ -145,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/event-exporter/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/event-exporter/vendor/cloud.google.com/go/compute/metadata/tidyfix.go new file mode 100644 index 000000000..4cef48500 --- /dev/null +++ b/event-exporter/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package metadata + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go/compute/internal" diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/README.md b/event-exporter/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b..8bf0e5b78 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/README.md +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/testall.sh b/event-exporter/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000..94b9c4439 --- /dev/null +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash.go b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d54..a9e0d45c9 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf7..3e8b13257 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000..7e3145a22 --- /dev/null +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f..9216e0a40 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a82160..26df13bba 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a3..e86f1b5fd 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e..1c1638fd8 100644 --- a/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/event-exporter/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/.gitignore b/event-exporter/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/event-exporter/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 000000000..e028b46a9 --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/Makefile b/event-exporter/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 000000000..4e12afdd9 --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/README.md b/event-exporter/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 000000000..cf6b42f3d --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/event-exporter/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 000000000..bec7b71b3 --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/docs.go b/event-exporter/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 000000000..203c35b3c --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 000000000..101cedde6 --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 000000000..e0951df15 --- /dev/null +++ b/event-exporter/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/.golangci.yaml b/event-exporter/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/event-exporter/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/event-exporter/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/event-exporter/vendor/github.com/go-logr/logr/README.md b/event-exporter/vendor/github.com/go-logr/logr/README.md index ad825f5f0..8969526a6 100644 --- a/event-exporter/vendor/github.com/go-logr/logr/README.md +++ b/event-exporter/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +74,30 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -105,14 +130,115 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. + +### Using a `logr.LogSink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +### Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +### Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ @@ -237,7 +363,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/event-exporter/vendor/github.com/go-logr/logr/SECURITY.md b/event-exporter/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 000000000..1ca756fc7 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/event-exporter/vendor/github.com/go-logr/logr/context.go b/event-exporter/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 000000000..de8bcc3ad --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/context_noslog.go b/event-exporter/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 000000000..f012f9a18 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/context_slog.go b/event-exporter/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 000000000..065ef0b82 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/discard.go b/event-exporter/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/event-exporter/vendor/github.com/go-logr/logr/discard.go +++ b/event-exporter/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/event-exporter/vendor/github.com/go-logr/logr/funcr/funcr.go b/event-exporter/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 000000000..fb2f866f4 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,911 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []any + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') // for the whole line + } + + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + + if f.parentValuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.parentValuesStr) + continuing = true + } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + continuing = true + } + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups + } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole line + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + copied := false + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(f.comma()) + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + +func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(f.comma()) + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(f.comma()) + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(f.comma()) + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(f.comma()) + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/funcr/slogsink.go b/event-exporter/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 000000000..7bd84761e --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/logr.go b/event-exporter/vendor/github.com/go-logr/logr/logr.go index c05482a20..b4428e105 100644 --- a/event-exporter/vendor/github.com/go-logr/logr/logr.go +++ b/event-exporter/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -115,15 +123,24 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // -// Key Naming Conventions +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -132,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -187,16 +207,15 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -235,7 +254,13 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -244,8 +269,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { - if l.Enabled() { +func (l Logger) Info(msg string, keysAndValues ...any) { + if l.sink == nil { + return + } + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -263,7 +291,10 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -275,6 +306,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -282,9 +316,19 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -295,6 +339,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -315,6 +362,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -336,6 +386,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -348,43 +401,9 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil } // RuntimeInfo holds information that the logr "core" library knows which @@ -418,22 +437,22 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -458,7 +477,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. @@ -497,5 +516,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/event-exporter/vendor/github.com/go-logr/logr/sloghandler.go b/event-exporter/vendor/github.com/go-logr/logr/sloghandler.go new file mode 100644 index 000000000..82d1ba494 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/sloghandler.go @@ -0,0 +1,192 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(ToSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because FromSlogHandler needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + } + clone.sink = l.sink.WithValues(kvList...) + } + return &clone +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { + return name + } + if name == "" { + return prefix + } + return prefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original Logger had a V level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/slogr.go b/event-exporter/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 000000000..28a83d024 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/event-exporter/vendor/github.com/go-logr/logr/slogsink.go b/event-exporter/vendor/github.com/go-logr/logr/slogsink.go new file mode 100644 index 000000000..4060fcbc2 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/logr/slogsink.go @@ -0,0 +1,120 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +var ( + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewFromLogHandler. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + _ = l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) LogSink { + if l.name != "" { + l.name += "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/event-exporter/vendor/github.com/go-logr/stdr/LICENSE b/event-exporter/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/github.com/go-logr/stdr/README.md b/event-exporter/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 000000000..515866789 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/event-exporter/vendor/github.com/go-logr/stdr/stdr.go b/event-exporter/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 000000000..93a8aab51 --- /dev/null +++ b/event-exporter/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/event-exporter/vendor/github.com/golang/glog/glog.go b/event-exporter/vendor/github.com/golang/glog/glog.go index 718c34f88..dd0ed1015 100644 --- a/event-exporter/vendor/github.com/golang/glog/glog.go +++ b/event-exporter/vendor/github.com/golang/glog/glog.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,15 +50,15 @@ // Log files will be written to this directory instead of the // default temporary directory. // -// Other flags provide aids to debugging. +// Other flags provide aids to debugging. // // -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as +// A comma-separated list of file and line numbers holding a logging +// statement, such as // -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) +// A stack trace will be written to the Info log whenever execution +// hits one of these statements. (Unlike with -vmodule, the ".go" +// must bepresent.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" @@ -66,100 +66,46 @@ // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// +// sets the V level to 3 in all Go files whose names begin with "gopher", +// and +// -vmodule=/path/to/glog/glog_test=1 +// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. +// If a glob pattern contains a slash, it is matched against the full path, +// and the file name. Otherwise, the pattern is +// matched only against the file's basename. When both -vmodule and -v +// are specified, the -vmodule values take precedence for the specified +// modules. package glog +// This file contains the parts of the log package that are shared among all +// implementations (file, envelope, and appengine). + import ( - "bufio" "bytes" "errors" - "flag" "fmt" - "io" stdLog "log" "os" - "path/filepath" + "reflect" "runtime" + "runtime/pprof" "strconv" - "strings" "sync" "sync/atomic" "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 + "github.com/golang/glog/internal/logsink" + "github.com/golang/glog/internal/stackdump" ) -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} +var timeNow = time.Now // Stubbed out for testing. -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} +// ErrNoLog is the error we return if no log file has yet been created +// for the specified log type. +var ErrNoLog = errors.New("log file not yet created") // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { @@ -183,724 +129,99 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [...]*OutputStats{ + logsink.Info: &Stats.Info, + logsink.Warning: &Stats.Warning, + logsink.Error: &Stats.Error, + logsink.Fatal: nil, } -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. +// Level specifies a level of verbosity for V logs. The -v flag is of type +// Level and should be modified only through the flag.Value interface. type Level int32 -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") +var metaPool sync.Pool // Pool of *logsink.Meta. -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) +// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a +// pointer, allocating a new one if necessary. (Returning the interface value +// directly avoids an allocation if there was an existing pointer in the pool.) +func metaPoolGet() (any, *logsink.Meta) { + if metai := metaPool.Get(); metai != nil { + return metai, metai.(*logsink.Meta) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + meta := new(logsink.Meta) + return meta, meta } -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} +type stack bool -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. +const ( + noStack = stack(false) + withStack = stack(true) +) -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) +func appendBacktrace(depth int, format string, args []any) (string, []any) { + // Capture a backtrace as a stackdump.Stack (both text and PC slice). + // Structured log sinks can extract the backtrace in whichever format they + // prefer (PCs or text), and Text sinks will include it as just another part + // of the log message. + // + // Use depth instead of depth+1 so that the backtrace always includes the + // log function itself - otherwise the reason for the trace appearing in the + // log may not be obvious to the reader. + dump := stackdump.Caller(depth) + + // Add an arg and an entry in the format string for the stack dump. + // + // Copy the "args" slice to avoid a rare but serious aliasing bug + // (corrupting the caller's slice if they passed it to a non-Fatal call + // using "..."). + format = format + "\n\n%v\n" + args = append(append([]any(nil), args...), dump) + + return format, args +} + +// logf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + now := timeNow() + _, file, line, ok := runtime.Caller(depth + 1) if !ok { file = "???" line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } } - return l.formatHeader(s, file, line), file, line -} -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. + if stack == withStack || backtraceAt(file, line) { + format, args = appendBacktrace(depth+1, format, args) } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: now, + File: file, + Line: line, + Depth: depth + 1, + Severity: severity, + Verbose: verbose, + Thread: int64(pid), } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) + sinkf(meta, format, args...) + metaPool.Put(metai) } -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { +func sinkf(meta *logsink.Meta, format string, args ...any) { + meta.Depth++ + n, err := logsink.Printf(meta, format, args...) + if stats := severityStats[meta.Severity]; stats != nil { atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) + atomic.AddInt64(&stats.bytes, int64(n)) } -} -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } + logsink.Printf(meta, "glog: exiting because of error: %s", err) + sinks.file.Flush() + os.Exit(2) } } @@ -912,9 +233,9 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) } // Set a log format that captures the user's file and line: // d.go:23: message @@ -922,9 +243,22 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the Google logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge logsink.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -946,36 +280,72 @@ func (lb logBridge) Write(b []byte) (n int, err error) { line = 1 } } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) + + // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk + // up the stack looking for src/log/log.go but that seems like it would be + // unfortunately slow. + const stdLogDepth = 4 + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: timeNow(), + File: file, + Line: line, + Depth: stdLogDepth, + Severity: logsink.Severity(lb), + Thread: int64(pid), + } + + format := "%s" + args := []any{text} + if backtraceAt(file, line) { + format, args = appendBacktrace(meta.Depth, format, args) + } + + sinkf(meta, format, args...) + metaPool.Put(metai) + return len(b), nil } -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] +// defaultFormat returns a fmt.Printf format specifier that formats its +// arguments as if they were passed to fmt.Print. +func defaultFormat(args []any) string { + n := len(args) + switch n { + case 0: + return "" + case 1: + return "%v" + } + + b := make([]byte, 0, n*3-1) + wasString := true // Suppress leading space. + for _, arg := range args { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + if wasString || isString { + b = append(b, "%v"...) + } else { + b = append(b, " %v"...) + } + wasString = isString } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] + return string(b) +} + +// lnFormat returns a fmt.Printf format specifier that formats its arguments +// as if they were passed to fmt.Println. +func lnFormat(args []any) string { + if len(args) == 0 { + return "\n" } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } + + b := make([]byte, 0, len(args)*3) + for range args { + b = append(b, "%v "...) } - l.vmap[pc] = 0 - return 0 + b[len(b)-1] = '\n' // Replace the last space with a newline. + return string(b) } // Verbose is a boolean type that implements Infof (like Printf) etc. @@ -986,9 +356,13 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if glog.V(2) { glog.Info("log this") } +// // or +// // glog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -997,184 +371,222 @@ type Verbose bool // V is at most the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. + return VDepth(1, level) +} - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } +// VDepth acts as V but uses depth to determine which call frame to check vmodule for. +// VDepth(0, level) is the same as V(level). +func VDepth(depth int, level Level) Verbose { + return Verbose(verboseEnabled(depth+1, level)) +} - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...any) { + v.InfoDepth(1, args...) +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) } - return Verbose(false) } -// Info is equivalent to the global Info function, guarded by the value of v. +// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { +func (v Verbose) InfoDepthf(depth int, format string, args ...any) { if v { - logging.print(infoLog, args...) + logf(depth+1, logsink.Info, true, noStack, format, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { +func (v Verbose) Infoln(args ...any) { if v { - logging.println(infoLog, args...) + logf(1, logsink.Info, true, noStack, lnFormat(args), args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { +func (v Verbose) Infof(format string, args ...any) { if v { - logging.printf(infoLog, format, args...) + logf(1, logsink.Info, true, noStack, format, args...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) +func Info(args ...any) { + InfoDepth(1, args...) } -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) +// InfoDepth calls Info from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +func InfoDepth(depth int, args ...any) { + logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) +} + +// InfoDepthf acts as InfoDepth but with format string. +func InfoDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Info, false, noStack, format, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) +func Infoln(args ...any) { + logf(1, logsink.Info, false, noStack, lnFormat(args), args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) +func Infof(format string, args ...any) { + logf(1, logsink.Info, false, noStack, format, args...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) +func Warning(args ...any) { + WarningDepth(1, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) +func WarningDepth(depth int, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. +// WarningDepthf(0, "msg") is the same as Warningf("msg"). +func WarningDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, format, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) +func Warningln(args ...any) { + logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) +func Warningf(format string, args ...any) { + logf(1, logsink.Warning, false, noStack, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) +func Error(args ...any) { + ErrorDepth(1, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) +func ErrorDepth(depth int, args ...any) { + logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. +// ErrorDepthf(0, "msg") is the same as Errorf("msg"). +func ErrorDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Error, false, noStack, format, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) +func Errorln(args ...any) { + logf(1, logsink.Error, false, noStack, lnFormat(args), args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) +func Errorf(format string, args ...any) { + logf(1, logsink.Error, false, noStack, format, args...) +} + +func fatalf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, withStack, format, args...) + sinks.file.Flush() + + err := abortProcess() // Should not return. + + // Failed to abort the process using signals. Dump a stack trace and exit. + Errorf("abortProcess returned unexpectedly: %v", err) + sinks.file.Flush() + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + os.Exit(2) // Exit with the same code as the default SIGABRT handler. } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) +func Fatal(args ...any) { + FatalDepth(1, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) +func FatalDepth(depth int, args ...any) { + fatalf(depth+1, defaultFormat(args), args...) +} + +// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. +// FatalDepthf(0, "msg") is the same as Fatalf("msg"). +func FatalDepthf(depth int, format string, args ...any) { + fatalf(depth+1, format, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) +func Fatalln(args ...any) { + fatalf(1, lnFormat(args), args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) +func Fatalf(format string, args ...any) { + fatalf(1, format, args...) } -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 +func exitf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, noStack, format, args...) + sinks.file.Flush() + os.Exit(1) +} // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) +func Exit(args ...any) { + ExitDepth(1, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) +func ExitDepth(depth int, args ...any) { + exitf(depth+1, defaultFormat(args), args...) +} + +// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. +// ExitDepthf(0, "msg") is the same as Exitf("msg"). +func ExitDepthf(depth int, format string, args ...any) { + exitf(depth+1, format, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) +func Exitln(args ...any) { + exitf(1, lnFormat(args), args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) +func Exitf(format string, args ...any) { + exitf(1, format, args...) } diff --git a/event-exporter/vendor/github.com/golang/glog/glog_file.go b/event-exporter/vendor/github.com/golang/glog/glog_file.go index 65075d281..e7d125c5a 100644 --- a/event-exporter/vendor/github.com/golang/glog/glog_file.go +++ b/event-exporter/vendor/github.com/golang/glog/glog_file.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,26 +19,34 @@ package glog import ( + "bufio" + "bytes" "errors" "flag" "fmt" + "io" "os" "os/user" "path/filepath" + "runtime" "strings" "sync" "time" -) -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 + "github.com/golang/glog/internal/logsink" +) // logDirs lists the candidate directories for new log files. var logDirs []string -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") +var ( + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") + logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ + " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") +) func createLogDirs() { if *logDir != "" { @@ -64,9 +72,17 @@ func init() { if err == nil { userName = current.Username } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) + // Sanitize userName since it is used to construct file paths. + userName = strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r >= '0' && r <= '9': + default: + return '_' + } + return r + }, userName) } // shortHostname returns its argument, truncating at the first period. @@ -116,9 +132,282 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err os.Symlink(name, symlink) // ignore err + if *logLink != "" { + lsymlink := filepath.Join(*logLink, link) + os.Remove(lsymlink) // ignore err + os.Symlink(fname, lsymlink) // ignore err + } return f, fname, nil } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer + filenames() []string +} + +var sinks struct { + stderr stderrSink + file fileSink +} + +func init() { + // Register stderr first: that way if we crash during file-writing at least + // the log will have gone somewhere. + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + + sinks.file.flushChan = make(chan logsink.Severity, 1) + go sinks.file.flushDaemon() +} + +// stderrSink is a logsink.Text that writes log entries to stderr +// if they meet certain conditions. +type stderrSink struct { + mu sync.Mutex + w io.Writer // if nil Emit uses os.Stderr directly +} + +// Enabled implements logsink.Text.Enabled. It returns true if any of the +// various stderr flags are enabled for logs of the given severity, if the log +// message is from the standard "log" package, or if google.Init has not yet run +// (and hence file logging is not yet initialized). +func (s *stderrSink) Enabled(m *logsink.Meta) bool { + return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() +} + +// Emit implements logsink.Text.Emit. +func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + w := s.w + if w == nil { + w = os.Stderr + } + dn, err := w.Write(data) + n += dn + return n, err +} + +// severityWriters is an array of flushSyncWriter with a value for each +// logsink.Severity. +type severityWriters [4]flushSyncWriter + +// fileSink is a logsink.Text that prints to a set of Google log files. +type fileSink struct { + mu sync.Mutex + // file holds writer for each of the log types. + file severityWriters + flushChan chan logsink.Severity +} + +// Enabled implements logsink.Text.Enabled. It returns true if google.Init +// has run and both --disable_log_to_disk and --logtostderr are false. +func (s *fileSink) Enabled(m *logsink.Meta) bool { + return !toStderr +} + +// Emit implements logsink.Text.Emit +func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err = s.createMissingFiles(m.Severity); err != nil { + return 0, err + } + for sev := m.Severity; sev >= logsink.Info; sev-- { + if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { + err = fErr // Take the first error. + } + } + n = len(data) + if int(m.Severity) > *logBufLevel { + select { + case s.flushChan <- m.Severity: + default: + } + } + + return n, err +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// s.mu is held for all its methods. +type syncBuffer struct { + sink *fileSink + *bufio.Writer + file *os.File + names []string + sev logsink.Severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + return 0, err + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + return n, err +} + +func (sb *syncBuffer) filenames() []string { + return sb.names +} + +const footer = "\nCONTINUED IN NEXT FILE\n" + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + var err error + pn := "" + file, name, err := create(sb.sev.String(), now) + + if sb.file != nil { + // The current log file becomes the previous log at the end of + // this block, so save its name for use in the header of the next + // file. + pn = sb.file.Name() + sb.Flush() + // If there's an existing file, write a footer with the name of + // the next file in the chain, followed by the constant string + // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. + sb.file.Write([]byte("Next log: ")) + sb.file.Write([]byte(name)) + sb.file.Write([]byte(footer)) + sb.file.Close() + } + + sb.file = file + sb.names = append(sb.names, name) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Previous log: %s\n", pn) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createMissingFiles creates all the log files for severity from infoLog up to +// upTo that have not already been created. +// s.mu is held. +func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { + if s.file[upTo] != nil { + return nil + } + now := time.Now() + // Files are created in increasing severity order, so we can be assured that + // if a high severity logfile exists, then so do all of lower severity. + for sev := logsink.Info; sev <= upTo; sev++ { + if s.file[sev] != nil { + continue + } + sb := &syncBuffer{ + sink: s, + sev: sev, + } + if err := sb.rotateFile(now); err != nil { + return err + } + s.file[sev] = sb + } + return nil +} + +// flushDaemon periodically flushes the log file buffers. +func (s *fileSink) flushDaemon() { + tick := time.NewTicker(30 * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + s.Flush() + case sev := <-s.flushChan: + s.flush(sev) + } + } +} + +// Flush flushes all pending log I/O. +func Flush() { + sinks.file.Flush() +} + +// Flush flushes all the logs and attempts to "sync" their data to disk. +func (s *fileSink) Flush() error { + return s.flush(logsink.Info) +} + +// flush flushes all logs of severity threshold or greater. +func (s *fileSink) flush(threshold logsink.Severity) error { + s.mu.Lock() + defer s.mu.Unlock() + + var firstErr error + updateErr := func(err error) { + if err != nil && firstErr == nil { + firstErr = err + } + } + + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + file := s.file[sev] + if file != nil { + updateErr(file.Flush()) + updateErr(file.Sync()) + } + } + + return firstErr +} + +// Names returns the names of the log files holding the FATAL, ERROR, +// WARNING, or INFO logs. Returns ErrNoLog if the log for the given +// level doesn't exist (e.g. because no messages of that level have been +// written). This may return multiple names if the log type requested +// has rolled over. +func Names(s string) ([]string, error) { + severity, err := logsink.ParseSeverity(s) + if err != nil { + return nil, err + } + + sinks.file.mu.Lock() + defer sinks.file.mu.Unlock() + f := sinks.file.file[severity] + if f == nil { + return nil, ErrNoLog + } + + return f.filenames(), nil +} diff --git a/event-exporter/vendor/github.com/golang/glog/glog_file_linux.go b/event-exporter/vendor/github.com/golang/glog/glog_file_linux.go new file mode 100644 index 000000000..d795092d0 --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/glog_file_linux.go @@ -0,0 +1,39 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package glog + +import ( + "errors" + "runtime" + "syscall" +) + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (like stderr). +// +// It does this by sending SIGABRT to the current thread. +// +// If successful, abortProcess does not return. +func abortProcess() error { + runtime.LockOSThread() + if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil { + return err + } + return errors.New("log: killed current thread with SIGABRT, but still running") +} diff --git a/event-exporter/vendor/github.com/golang/glog/glog_file_other.go b/event-exporter/vendor/github.com/golang/glog/glog_file_other.go new file mode 100644 index 000000000..9540f14fc --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/glog_file_other.go @@ -0,0 +1,30 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !(unix || windows) + +package glog + +import ( + "fmt" + "runtime" +) + +// abortProcess returns an error on platforms that presumably don't support signals. +func abortProcess() error { + return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH) + +} diff --git a/event-exporter/vendor/github.com/golang/glog/glog_file_posix.go b/event-exporter/vendor/github.com/golang/glog/glog_file_posix.go new file mode 100644 index 000000000..c27c7c0e4 --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/glog_file_posix.go @@ -0,0 +1,53 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (unix || windows) && !linux + +package glog + +import ( + "os" + "syscall" + "time" +) + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (like stderr). +// +// It does this by sending SIGABRT to the current process. Unfortunately, the +// signal may or may not be delivered to the current thread; in order to do that +// portably, we would need to add a cgo dependency and call pthread_kill. +// +// If successful, abortProcess does not return. +func abortProcess() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + if err := p.Signal(syscall.SIGABRT); err != nil { + return err + } + + // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to + // run (and eventually terminate the process themselves). + // + // We could just "select{}" here, but there's an outside chance that would + // trigger the runtime's deadlock detector if there happen not to be any + // background goroutines running. So we'll sleep a while first to give + // the signal some time. + time.Sleep(10 * time.Second) + select {} +} diff --git a/event-exporter/vendor/github.com/golang/glog/glog_flags.go b/event-exporter/vendor/github.com/golang/glog/glog_flags.go new file mode 100644 index 000000000..fa4371afd --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/glog_flags.go @@ -0,0 +1,398 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog/internal/logsink" +) + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + full bool // The pattern wants to match the full path + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(full, file string) bool { + if m.literal { + if m.full { + return full == m.pattern + } + return file == m.pattern + } + if m.full { + match, _ := filepath.Match(m.pattern, full) + return match + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// isFull reports whether the pattern matches the full file path, that is, +// whether it contains /. +func isFull(pattern string) bool { + return strings.ContainsRune(pattern, '/') +} + +// verboseFlags represents the setting of the -v and -vmodule flags. +type verboseFlags struct { + // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() + // call site, identified by PC. If there is no matching -vmodule filter, + // the cached value is exactly v. moduleLevelCache is replaced with a new + // Map whenever the -vmodule or -v flag changes state. + moduleLevelCache atomic.Value + + // mu guards all fields below. + mu sync.Mutex + + // v stores the value of the -v flag. It may be read safely using + // sync.LoadInt32, but is only modified under mu. + v Level + + // module stores the parsed -vmodule flag. + module []modulePat + + // moduleLength caches len(module). If greater than zero, it + // means vmodule is enabled. It may be read safely using sync.LoadInt32, but + // is only modified under mu. + moduleLength int32 +} + +// NOTE: For compatibility with the open-sourced v1 version of this +// package (github.com/golang/glog) we need to retain that flag.Level +// implements the flag.Value interface. See also go/log-vs-glog. + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(l.Get().(Level)), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() any { + if l == &vflags.v { + // l is the value registered for the -v flag. + return Level(atomic.LoadInt32((*int32)(l))) + } + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if l == &vflags.v { + // l is the value registered for the -v flag. + vflags.mu.Lock() + defer vflags.mu.Unlock() + vflags.moduleLevelCache.Store(&sync.Map{}) + atomic.StoreInt32((*int32)(l), int32(v)) + return nil + } + *l = Level(v) + return nil +} + +// vModuleFlag is the flag.Value for the --vmodule flag. +type vModuleFlag struct{ *verboseFlags } + +func (f vModuleFlag) String() string { + // Do not panic on the zero value. + // https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ. + if f.verboseFlags == nil { + return "" + } + f.mu.Lock() + defer f.mu.Unlock() + + var b bytes.Buffer + for i, f := range f.module { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get returns nil for this flag type since the struct is not exported. +func (f vModuleFlag) Get() any { return nil } + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 +func (f vModuleFlag) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) + } + + f.mu.Lock() + defer f.mu.Unlock() + f.module = filter + atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) + f.moduleLevelCache.Store(&sync.Map{}) + return nil +} + +func (f *verboseFlags) levelForPC(pc uintptr) Level { + if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { + return level.(Level) + } + + f.mu.Lock() + defer f.mu.Unlock() + level := Level(f.v) + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d for + // regular matches, /a/b/c/d for full matches. + file = strings.TrimSuffix(file, ".go") + full := file + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range f.module { + if filter.match(full, file) { + level = filter.level + break // Use the first matching level. + } + } + f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) + return level +} + +func (f *verboseFlags) enabled(callerDepth int, level Level) bool { + if atomic.LoadInt32(&f.moduleLength) == 0 { + // No vmodule values specified, so compare against v level. + return Level(atomic.LoadInt32((*int32)(&f.v))) >= level + } + + pcs := [1]uintptr{} + if runtime.Callers(callerDepth+2, pcs[:]) < 1 { + return false + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + return f.levelForPC(frame.Entry) >= level +} + +// traceLocation represents an entry in the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +func parseTraceLocation(value string) (traceLocation, error) { + fields := strings.Split(value, ":") + if len(fields) != 2 { + return traceLocation{}, errTraceSyntax + } + file, lineStr := fields[0], fields[1] + if !strings.Contains(file, ".") { + return traceLocation{}, errTraceSyntax + } + line, err := strconv.Atoi(lineStr) + if err != nil { + return traceLocation{}, errTraceSyntax + } + if line < 0 { + return traceLocation{}, errors.New("negative value for line") + } + return traceLocation{file, line}, nil +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +func (t traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t traceLocation) String() string { + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// traceLocations represents the -log_backtrace_at flag. +// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 +// Note that unlike vmodule the file extension is included here. +type traceLocations struct { + mu sync.Mutex + locsLen int32 // Safe for atomic read without mu. + locs []traceLocation +} + +func (t *traceLocations) String() string { + t.mu.Lock() + defer t.mu.Unlock() + + var buf bytes.Buffer + for i, tl := range t.locs { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(tl.String()) + } + return buf.String() +} + +// Get always returns nil for this flag type since the struct is not exported +func (t *traceLocations) Get() any { return nil } + +func (t *traceLocations) Set(value string) error { + var locs []traceLocation + for _, s := range strings.Split(value, ",") { + if s == "" { + continue + } + loc, err := parseTraceLocation(s) + if err != nil { + return err + } + locs = append(locs, loc) + } + + t.mu.Lock() + defer t.mu.Unlock() + atomic.StoreInt32(&t.locsLen, int32(len(locs))) + t.locs = locs + return nil +} + +func (t *traceLocations) match(file string, line int) bool { + if atomic.LoadInt32(&t.locsLen) == 0 { + return false + } + + t.mu.Lock() + defer t.mu.Unlock() + for _, tl := range t.locs { + if tl.match(file, line) { + return true + } + } + return false +} + +// severityFlag is an atomic flag.Value implementation for logsink.Severity. +type severityFlag int32 + +func (s *severityFlag) get() logsink.Severity { + return logsink.Severity(atomic.LoadInt32((*int32)(s))) +} +func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } +func (s *severityFlag) Get() any { return s.get() } +func (s *severityFlag) Set(value string) error { + threshold, err := logsink.ParseSeverity(value) + if err != nil { + // Not a severity name. Try a raw number. + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = logsink.Severity(v) + if threshold < logsink.Info || threshold > logsink.Fatal { + return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) + } + } + atomic.StoreInt32((*int32)(s), int32(threshold)) + return nil +} + +var ( + vflags verboseFlags // The -v and -vmodule flags. + + logBacktraceAt traceLocations // The -log_backtrace_at flag. + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + stderrThreshold severityFlag // The -stderrthreshold flag. +) + +// verboseEnabled returns whether the caller at the given depth should emit +// verbose logs at the given level, with depth 0 identifying the caller of +// verboseEnabled. +func verboseEnabled(callerDepth int, level Level) bool { + return vflags.enabled(callerDepth+1, level) +} + +// backtraceAt returns whether the logging call at the given function and line +// should also emit a backtrace of the current call stack. +func backtraceAt(file string, line int) bool { + return logBacktraceAt.match(file, line) +} + +func init() { + vflags.moduleLevelCache.Store(&sync.Map{}) + + flag.Var(&vflags.v, "v", "log level for V logs") + flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + + flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + stderrThreshold = severityFlag(logsink.Error) + + flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") +} diff --git a/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink.go b/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink.go new file mode 100644 index 000000000..53758e1c9 --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink.go @@ -0,0 +1,387 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logsink + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/stackdump" +) + +// MaxLogMessageLen is the limit on length of a formatted log message, including +// the standard line prefix and trailing newline. +// +// Chosen to match C++ glog. +const MaxLogMessageLen = 15000 + +// A Severity is a severity at which a message can be logged. +type Severity int8 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + Info Severity = iota + Warning + Error + + // Fatal contains logs written immediately before the process terminates. + // + // Sink implementations should not terminate the process themselves: the log + // package will perform any necessary cleanup and terminate the process as + // appropriate. + Fatal +) + +func (s Severity) String() string { + switch s { + case Info: + return "INFO" + case Warning: + return "WARNING" + case Error: + return "ERROR" + case Fatal: + return "FATAL" + } + return fmt.Sprintf("%T(%d)", s, s) +} + +// ParseSeverity returns the case-insensitive Severity value for the given string. +func ParseSeverity(name string) (Severity, error) { + name = strings.ToUpper(name) + for s := Info; s <= Fatal; s++ { + if s.String() == name { + return s, nil + } + } + return -1, fmt.Errorf("logsink: invalid severity %q", name) +} + +// Meta is metadata about a logging call. +type Meta struct { + // Time is the time at which the log call was made. + Time time.Time + + // File is the source file from which the log entry originates. + File string + // Line is the line offset within the source file. + Line int + // Depth is the number of stack frames between the logsink and the log call. + Depth int + + Severity Severity + + // Verbose indicates whether the call was made via "log.V". Log entries below + // the current verbosity threshold are not sent to the sink. + Verbose bool + + // Thread ID. This can be populated with a thread ID from another source, + // such as a system we are importing logs from. In the normal case, this + // will be set to the process ID (PID), since Go doesn't have threads. + Thread int64 + + // Stack trace starting in the logging function. May be nil. + // A logsink should implement the StackWanter interface to request this. + // + // Even if WantStack returns false, this field may be set (e.g. if another + // sink wants a stack trace). + Stack *stackdump.Stack +} + +// Structured is a logging destination that accepts structured data as input. +type Structured interface { + // Printf formats according to a fmt.Printf format specifier and writes a log + // entry. The precise result of formatting depends on the sink, but should + // aim for consistency with fmt.Printf. + // + // Printf returns the number of bytes occupied by the log entry, which + // may not be equal to the total number of bytes written. + // + // Printf returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + Printf(meta *Meta, format string, a ...any) (n int, err error) +} + +// StackWanter can be implemented by a logsink.Structured to indicate that it +// wants a stack trace to accompany at least some of the log messages it receives. +type StackWanter interface { + // WantStack returns true if the sink requires a stack trace for a log message + // with this metadata. + // + // NOTE: Returning true implies that meta.Stack will be non-nil. Returning + // false does NOT imply that meta.Stack will be nil. + WantStack(meta *Meta) bool +} + +// Text is a logging destination that accepts pre-formatted log lines (instead of +// structured data). +type Text interface { + // Enabled returns whether this sink should output messages for the given + // Meta. If the sink returns false for a given Meta, the Printf function will + // not call Emit on it for the corresponding log message. + Enabled(*Meta) bool + + // Emit writes a pre-formatted text log entry (including any applicable + // header) to the log. It returns the number of bytes occupied by the entry + // (which may differ from the length of the passed-in slice). + // + // Emit returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + // + // NOTE: When developing a text sink, keep in mind the surface in which the + // logs will be displayed, and whether it's important that the sink be + // resistent to tampering in the style of b/211428300. Standard text sinks + // (like `stderrSink`) do not protect against this (e.g. by escaping + // characters) because the cases where they would show user-influenced bytes + // are vanishingly small. + Emit(*Meta, []byte) (n int, err error) +} + +// bufs is a pool of *bytes.Buffer used in formatting log entries. +var bufs sync.Pool // Pool of *bytes.Buffer. + +// textPrintf formats a text log entry and emits it to all specified Text sinks. +// +// The returned n is the maximum across all Emit calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { + // We expect at most file, stderr, and perhaps syslog. If there are more, + // we'll end up allocating - no big deal. + const maxExpectedTextSinks = 3 + var noAllocSinks [maxExpectedTextSinks]Text + + sinks := noAllocSinks[:0] + for _, s := range textSinks { + if s.Enabled(m) { + sinks = append(sinks, s) + } + } + if len(sinks) == 0 && m.Severity != Fatal { + return 0, nil // No TextSinks specified; don't bother formatting. + } + + bufi := bufs.Get() + var buf *bytes.Buffer + if bufi == nil { + buf = bytes.NewBuffer(nil) + bufi = buf + } else { + buf = bufi.(*bytes.Buffer) + buf.Reset() + } + + // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] + // + // The "PID" entry arguably ought to be TID for consistency with other + // environments, but TID is not meaningful in a Go program due to the + // multiplexing of goroutines across threads. + // + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + const severityChar = "IWEF" + buf.WriteByte(severityChar[m.Severity]) + + _, month, day := m.Time.Date() + hour, minute, second := m.Time.Clock() + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') + buf.WriteByte(' ') + + nDigits(buf, 7, uint64(m.Thread), ' ') + buf.WriteByte(' ') + + { + file := m.File + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + buf.WriteString(file) + } + + buf.WriteByte(':') + { + var tmp [19]byte + buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) + } + buf.WriteString("] ") + + msgStart := buf.Len() + fmt.Fprintf(buf, format, args...) + if buf.Len() > MaxLogMessageLen-1 { + buf.Truncate(MaxLogMessageLen - 1) + } + msgEnd := buf.Len() + if b := buf.Bytes(); b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + + for _, s := range sinks { + sn, sErr := s.Emit(m, buf.Bytes()) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + + if m.Severity == Fatal { + savedM := *m + fatalMessageStore(savedEntry{ + meta: &savedM, + msg: buf.Bytes()[msgStart:msgEnd], + }) + } else { + bufs.Put(bufi) + } + return n, err +} + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer to buf. +func twoDigits(buf *bytes.Buffer, d int) { + buf.WriteByte(digits[(d/10)%10]) + buf.WriteByte(digits[d%10]) +} + +// nDigits formats an n-digit integer to buf, padding with pad on the left. It +// assumes d != 0. +func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { + var tmp [20]byte + + cutoff := len(tmp) - n + j := len(tmp) - 1 + for ; d > 0; j-- { + tmp[j] = digits[d%10] + d /= 10 + } + for ; j >= cutoff; j-- { + tmp[j] = pad + } + j++ + buf.Write(tmp[j:]) +} + +// Printf writes a log entry to all registered TextSinks in this package, then +// to all registered StructuredSinks. +// +// The returned n is the maximum across all Emit and Printf calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func Printf(m *Meta, format string, args ...any) (n int, err error) { + m.Depth++ + n, err = textPrintf(m, TextSinks, format, args...) + + for _, sink := range StructuredSinks { + // TODO: Support TextSinks that implement StackWanter? + if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { + if m.Stack == nil { + // First, try to find a stacktrace in args, otherwise generate one. + for _, arg := range args { + if stack, ok := arg.(stackdump.Stack); ok { + m.Stack = &stack + break + } + } + if m.Stack == nil { + stack := stackdump.Caller( /* skipDepth = */ m.Depth) + m.Stack = &stack + } + } + } + sn, sErr := sink.Printf(m, format, args...) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + return n, err +} + +// The sets of sinks to which logs should be written. +// +// These must only be modified during package init, and are read-only thereafter. +var ( + // StructuredSinks is the set of Structured sink instances to which logs + // should be written. + StructuredSinks []Structured + + // TextSinks is the set of Text sink instances to which logs should be + // written. + // + // These are registered separately from Structured sink implementations to + // avoid the need to repeat the work of formatting a message for each Text + // sink that writes it. The package-level Printf function writes to both sets + // independenty, so a given log destination should only register a Structured + // *or* a Text sink (not both). + TextSinks []Text +) + +type savedEntry struct { + meta *Meta + msg []byte +} + +// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. +// +// The purpose of this sink is to allow applications to intercept logging calls before they are +// serialized and sent to Text sinks. For example, if one needs to redact PII from logging +// arguments before they reach STDERR, one solution would be to do the redacting in a Structured +// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that +// StructuredTextWrapper instance. This is how one could set this up in their application: +// +// func init() { +// +// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} +// // sanitizersink will intercept logs and remove PII +// sanitizer := sanitizersink{Sink: &wrapper} +// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) +// logsink.TextSinks = nil +// +// } +type StructuredTextWrapper struct { + // TextSinks is the set of Text sinks that should receive logs from this + // StructuredTextWrapper instance. + TextSinks []Text +} + +// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. +func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { + return textPrintf(meta, w.TextSinks, format, args...) +} diff --git a/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go new file mode 100644 index 000000000..3dc269abc --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go @@ -0,0 +1,35 @@ +package logsink + +import ( + "sync/atomic" + "unsafe" +) + +func fatalMessageStore(e savedEntry) { + // Only put a new one in if we haven't assigned before. + atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) +} + +var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer + +// FatalMessage returns the Meta and message contents of the first message +// logged with Fatal severity, or false if none has occurred. +func FatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} + +// DoNotUseRacyFatalMessage is FatalMessage, but worse. +// +//go:norace +//go:nosplit +func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(fatalMessage) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} diff --git a/event-exporter/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/event-exporter/vendor/github.com/golang/glog/internal/stackdump/stackdump.go new file mode 100644 index 000000000..3427c9d6b --- /dev/null +++ b/event-exporter/vendor/github.com/golang/glog/internal/stackdump/stackdump.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdump provides wrappers for runtime.Stack and runtime.Callers +// with uniform support for skipping caller frames. +// +// ⚠ Unlike the functions in the runtime package, these may allocate a +// non-trivial quantity of memory: use them with care. ⚠ +package stackdump + +import ( + "bytes" + "runtime" +) + +// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to +// runtime.Stack itself or 0 if it does not. +// +// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc +// compiler does not. +var runtimeStackSelfFrames = func() int { + for n := 1 << 10; n < 1<<20; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if bytes.Contains(buf[:n], []byte("runtime.Stack")) { + return 1 + } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { + return 0 + } + } + return 0 +}() + +// Stack is a stack dump for a single goroutine. +type Stack struct { + // Text is a representation of the stack dump in a human-readable format. + Text []byte + + // PC is a representation of the stack dump using raw program counter values. + PC []uintptr +} + +func (s Stack) String() string { return string(s.Text) } + +// Caller returns the Stack dump for the calling goroutine, starting skipDepth +// frames before the caller of Caller. (Caller(0) provides a dump starting at +// the caller of this function.) +func Caller(skipDepth int) Stack { + return Stack{ + Text: CallerText(skipDepth + 1), + PC: CallerPC(skipDepth + 1), + } +} + +// CallerText returns a textual dump of the stack starting skipDepth frames before +// the caller. (CallerText(0) provides a dump starting at the caller of this +// function.) +func CallerText(skipDepth int) []byte { + for n := 1 << 10; ; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if n < len(buf) { + return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) + } + } +} + +// CallerPC returns a dump of the program counters of the stack starting +// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at +// the caller of this function.) +func CallerPC(skipDepth int) []uintptr { + for n := 1 << 8; ; n *= 2 { + buf := make([]uintptr, n) + n := runtime.Callers(skipDepth+2, buf) + if n < len(buf) { + return buf[:n] + } + } +} + +// pruneFrames removes the topmost skipDepth frames of the first goroutine in a +// textual stack dump. It overwrites the passed-in slice. +// +// If there are fewer than skipDepth frames in the first goroutine's stack, +// pruneFrames prunes it to an empty stack and leaves the remaining contents +// intact. +func pruneFrames(skipDepth int, stack []byte) []byte { + headerLen := 0 + for i, c := range stack { + if c == '\n' { + headerLen = i + 1 + break + } + } + if headerLen == 0 { + return stack // No header line - not a well-formed stack trace. + } + + skipLen := headerLen + skipNewlines := skipDepth * 2 + for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { + c := stack[skipLen] + if c != '\n' { + continue + } + skipNewlines-- + skipLen++ + if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { + break + } + } + + pruned := stack[skipLen-headerLen:] + copy(pruned, stack[:headerLen]) + return pruned +} diff --git a/event-exporter/vendor/github.com/golang/protobuf/jsonpb/decode.go b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..6c16c255f --- /dev/null +++ b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/event-exporter/vendor/github.com/golang/protobuf/jsonpb/encode.go b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/event-exporter/vendor/github.com/golang/protobuf/jsonpb/json.go b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/event-exporter/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/compare.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/compare.go index 2a5446762..0f5b8a48c 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/compare.go @@ -5,7 +5,7 @@ // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. +// [reflect.DeepEqual] for comparing whether two values are semantically equal. // It is intended to only be used in tests, as performance is not a goal and // it may panic if it cannot compare the values. Its propensity towards // panicking means that its unsuitable for production environments where a @@ -13,21 +13,22 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method +// to determine equality. This allows package authors to determine +// the equality operation for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an [Ignore] option +// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) +// or explicitly compared using the [Exporter] option. package cmp import ( @@ -40,45 +41,48 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that +// remain after applying all path filters, value filters, and type filters. +// If at least one [Ignore] exists in S, then the comparison is ignored. +// If the number of [Transformer] and [Comparer] options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single [Transformer], then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single [Comparer], then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option -// explicitly permits comparing the unexported field. +// If a struct contains unexported fields, Equal panics unless an [Ignore] option +// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field +// or the [Exporter] option explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. // Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Maps are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// To use custom comparisons for map keys, consider using +// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. // Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively @@ -142,7 +146,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -637,7 +641,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/export.go similarity index 94% rename from event-exporter/vendor/github.com/google/go-cmp/cmp/export_unsafe.go rename to event-exporter/vendor/github.com/google/go-cmp/cmp/export.go index e2c0f74e8..29f82fe6b 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/export.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package cmp import ( @@ -12,8 +9,6 @@ import ( "unsafe" ) -const supportExporters = true - // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/export_panic.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/export_panic.go deleted file mode 100644 index ae851fe53..000000000 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package cmp - -import "reflect" - -const supportExporters = false - -func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { - panic("no support for forcibly accessing unexported fields") -} diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16c..a248e5436 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go similarity index 95% rename from event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go rename to event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go index 16e6860af..e5dfff69a 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package value import ( diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go deleted file mode 100644 index 1a71bfcbd..000000000 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package value - -import "reflect" - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p uintptr - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // NOTE: Storing a pointer as an uintptr is technically incorrect as it - // assumes that the GC implementation does not use a moving collector. - return Pointer{v.Pointer(), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == 0 -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return p.p -} diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997..000000000 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/options.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb53..754496f3b 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/options.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/options.go @@ -13,15 +13,15 @@ import ( "github.com/google/go-cmp/cmp/internal/function" ) -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), +// Option configures for specific behavior of [Equal] and [Diff]. In particular, +// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), // configure how equality is determined. // -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. +// The fundamental options may be composed with filters ([FilterPath] and +// [FilterValues]) to control the scope over which they are applied. // -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. +// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions +// for creating options that may be used with [Equal] and [Diff]. type Option interface { // filter applies all filters and returns the option that remains. // Each option may only read s.curPath and call s.callTTBFunc. @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -54,9 +56,9 @@ type core struct{} func (core) isCore() {} -// Options is a list of Option values that also satisfies the Option interface. +// Options is a list of [Option] values that also satisfies the [Option] interface. // Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, +// [Option] values into a single [Option]. When this package processes an Options, // it will be implicitly expanded into a flat list. // // Applying a filter on an Options is equivalent to applying that same filter @@ -103,16 +105,16 @@ func (opts Options) String() string { return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) } -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. +// FilterPath returns a new [Option] where opt is only evaluated if filter f +// returns true for the current [Path] in the value tree. // // This filter is called even if a slice element or map entry is missing and // provides an opportunity to ignore such cases. The filter function must be // symmetric such that the filter result is identical regardless of whether the // missing value is from x or y. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterPath(f func(Path) bool, opt Option) Option { if f == nil { panic("invalid path filter function") @@ -140,7 +142,7 @@ func (f pathFilter) String() string { return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } -// FilterValues returns a new Option where opt is only evaluated if filter f, +// FilterValues returns a new [Option] where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the // current pair of values being compared. If either value is invalid or // the type of the values is not assignable to T, then this filter implicitly @@ -152,8 +154,8 @@ func (f pathFilter) String() string { // If T is an interface, it is possible that f is called with two values with // different concrete types that both implement T. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterValues(f interface{}, opt Option) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { @@ -190,9 +192,9 @@ func (f valuesFilter) String() string { return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. +// Ignore is an [Option] that causes all comparisons to be ignored. +// This value is intended to be combined with [FilterPath] or [FilterValues]. +// It is an error to pass an unfiltered Ignore option to [Equal]. func Ignore() Option { return ignore{} } type ignore struct{ core } @@ -232,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" + } else if t.Comparable() { + help = "consider using cmpopts.EquateComparable to compare comparable Go types" } } else { // Unnamed type with unexported fields. Derive PkgPath from field. @@ -252,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) -// Transformer returns an Option that applies a transformation function that +// Transformer returns an [Option] that applies a transformation function that // converts values of a certain type into that of another. // // The transformer f must be a function "func(T) R" that converts values of @@ -263,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) // same transform to the output of itself (e.g., in the case where the // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. +// in the tail of the [Path] since the last non-[Transform] step. // For situations where the implicit filter is still insufficient, -// consider using cmpopts.AcyclicTransformer, which adds a filter -// to prevent the transformer from being recursively applied upon itself. +// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], +// which adds a filter to prevent the transformer from +// being recursively applied upon itself. // -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep (and eventually shown in the Diff output). +// The name is a user provided label that is used as the [Transform.Name] in the +// transformation [PathStep] (and eventually shown in the [Diff] output). // The name must be a valid identifier or qualified identifier in Go syntax. // If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { @@ -327,7 +332,7 @@ func (tr transformer) String() string { return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } -// Comparer returns an Option that determines whether two values are equal +// Comparer returns an [Option] that determines whether two values are equal // to each other. // // The comparer f must be a function "func(T, T) bool" and is implicitly @@ -336,9 +341,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -375,35 +380,32 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// Exporter returns an Option that specifies whether Equal is allowed to +// Exporter returns an [Option] that specifies whether [Equal] is allowed to // introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal +// implementation of some external package may cause the result of [Equal] // to unexpectedly change. However, it may be valid to use this option on types // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// In many cases, a custom Comparer should be used instead that defines +// In many cases, a custom [Comparer] should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // -// For example, the reflect.Type documentation defines equality to be determined +// For example, the [reflect.Type] documentation defines equality to be determined // by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested +// comparison) and most attempts to compare *[regexp.Regexp] types are interested // in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: +// Both of these are accomplished using [Comparer] options: // // Comparer(func(x, y reflect.Type) bool { return x == y }) // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) // -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. +// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] +// option can be used to ignore all unexported fields on specified struct types. func Exporter(f func(reflect.Type) bool) Option { - if !supportExporters { - panic("Exporter is not supported on purego builds") - } return exporter(f) } @@ -413,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO panic("not implemented") } -// AllowUnexported returns an Options that allows Equal to forcibly introspect +// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect // unexported fields of the specified struct types. // -// See Exporter for the proper use of this option. +// See [Exporter] for the proper use of this option. func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see [Reporter]). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags @@ -443,7 +445,7 @@ func (r Result) Equal() bool { } // ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if Equal reports false. +// This never reports true if [Result.Equal] reports false. func (r Result) ByIgnore() bool { return r.flags&reportByIgnore != 0 } @@ -453,7 +455,7 @@ func (r Result) ByMethod() bool { return r.flags&reportByMethod != 0 } -// ByFunc reports whether a Comparer function determined equality. +// ByFunc reports whether a [Comparer] function determined equality. func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } @@ -476,7 +478,7 @@ const ( reportByCycle ) -// Reporter is an Option that can be passed to Equal. When Equal traverses +// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses // the value trees, it calls PushStep as it descends into each node in the // tree and PopStep as it ascend out of the node. The leaves of the tree are // either compared (determined to be equal or not equal) or ignored and reported diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/path.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/path.go index c71003463..c3c145642 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/path.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/path.go @@ -14,9 +14,9 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// Path is a list of PathSteps describing the sequence of operations to get +// Path is a list of [PathStep] describing the sequence of operations to get // from some root type to the current position in the value tree. -// The first Path element is always an operation-less PathStep that exists +// The first Path element is always an operation-less [PathStep] that exists // simply to identify the initial type. // // When traversing structs with embedded structs, the embedded struct will @@ -29,8 +29,13 @@ type Path []PathStep // a value's tree structure. Users of this package never need to implement // these types as values of this type will be returned by this package. // -// Implementations of this interface are -// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +// Implementations of this interface: +// - [StructField] +// - [SliceIndex] +// - [MapIndex] +// - [Indirect] +// - [TypeAssertion] +// - [Transform] type PathStep interface { String() string @@ -41,13 +46,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -70,8 +75,9 @@ func (pa *Path) pop() { *pa = (*pa)[:len(*pa)-1] } -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +// Last returns the last [PathStep] in the Path. +// If the path is empty, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Last() PathStep { return pa.Index(-1) } @@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { // Index returns the ith step in the Path and supports negative indexing. // A negative index starts counting from the tail of the Path such that -1 // refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +// If index is invalid, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Index(i int) PathStep { if i < 0 { i = len(pa) + i @@ -94,6 +101,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +116,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,14 +168,15 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } return fmt.Sprintf("{%s}", s) } -// StructField represents a struct field access on a field called Name. +// StructField is a [PathStep] that represents a struct field access +// on a field called [StructField.Name]. type StructField struct{ *structField } type structField struct { pathStep @@ -202,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } func (sf StructField) Name() string { return sf.name } // Index is the index of the field in the parent struct type. -// See reflect.Type.Field. +// See [reflect.Type.Field]. func (sf StructField) Index() int { return sf.idx } -// SliceIndex is an index operation on a slice or array at some index Key. +// SliceIndex is a [PathStep] that represents an index operation on +// a slice or array at some index [SliceIndex.Key]. type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep @@ -245,12 +256,12 @@ func (si SliceIndex) Key() int { // all of the indexes to be shifted. If an index is -1, then that // indicates that the element does not exist in the associated slice. // -// Key is guaranteed to return -1 if and only if the indexes returned -// by SplitKeys are not the same. SplitKeys will never return -1 for +// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes +// returned by SplitKeys are not the same. SplitKeys will never return -1 for // both indexes. func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } -// MapIndex is an index operation on a map at some index Key. +// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. type MapIndex struct{ *mapIndex } type mapIndex struct { pathStep @@ -264,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", // Key is the value of the map key. func (mi MapIndex) Key() reflect.Value { return mi.key } -// Indirect represents pointer indirection on the parent type. +// Indirect is a [PathStep] that represents pointer indirection on the parent type. type Indirect struct{ *indirect } type indirect struct { pathStep @@ -274,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } func (in Indirect) String() string { return "*" } -// TypeAssertion represents a type assertion on an interface. +// TypeAssertion is a [PathStep] that represents a type assertion on an interface. type TypeAssertion struct{ *typeAssertion } type typeAssertion struct { pathStep @@ -282,9 +293,10 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } -// Transform is a transformation from the parent type to the current type. +// Transform is a [PathStep] that represents a transformation +// from the parent type to the current type. type Transform struct{ *transform } type transform struct { pathStep @@ -295,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -// Name is the name of the Transformer. +// Name is the name of the [Transformer]. func (tf Transform) Name() string { return tf.trans.name } // Func is the function pointer to the transformer function. func (tf Transform) Func() reflect.Value { return tf.trans.fnc } -// Option returns the originally constructed Transformer option. +// Option returns the originally constructed [Transformer] option. // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_compare.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..2050bf6b4 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. @@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 76c04fdbd..e39f42284 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -192,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, break } sf := t.Field(i) - if supportExporters && !isExported(sf.Name) { + if !isExported(sf.Name) { vv = retrieveUnexportedField(v, sf, true) } s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) @@ -205,13 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) skipType = true - return opts.WithTypeMode(emitType).FormatType(t, out) + return opts.FormatType(t, out) } } @@ -282,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -293,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_slices.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae1..23e444f62 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_text.go b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ff..388fcf571 100644 --- a/event-exporter/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/event-exporter/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/event-exporter/vendor/github.com/google/s2a-go/.gitignore b/event-exporter/vendor/github.com/google/s2a-go/.gitignore new file mode 100644 index 000000000..01764d1cd --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/.gitignore @@ -0,0 +1,6 @@ +# Ignore binaries without extension +//example/client/client +//example/server/server +//internal/v2/fakes2av2_server/fakes2av2_server + +.idea/ \ No newline at end of file diff --git a/event-exporter/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/event-exporter/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..dc079b4d6 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md @@ -0,0 +1,93 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the +Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/event-exporter/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/event-exporter/vendor/github.com/google/s2a-go/CONTRIBUTING.md new file mode 100644 index 000000000..22b241cb7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/CONTRIBUTING.md @@ -0,0 +1,29 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement (CLA). You (or your employer) retain the copyright to your +contribution; this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff --git a/event-exporter/vendor/github.com/google/s2a-go/LICENSE.md b/event-exporter/vendor/github.com/google/s2a-go/LICENSE.md new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/LICENSE.md @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/github.com/google/s2a-go/README.md b/event-exporter/vendor/github.com/google/s2a-go/README.md new file mode 100644 index 000000000..fe0f5c1da --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/README.md @@ -0,0 +1,14 @@ +# Secure Session Agent Client Libraries + +The Secure Session Agent is a service that enables a workload to offload select +operations from the mTLS handshake and protects a workload's private key +material from exfiltration. Specifically, the workload asks the Secure Session +Agent for the TLS configuration to use during the handshake, to perform private +key operations, and to validate the peer certificate chain. The Secure Session +Agent's client libraries enable applications to communicate with the Secure +Session Agent during the TLS handshake, and to encrypt traffic to the peer +after the TLS handshake is complete. + +This repository contains the source code for the Secure Session Agent's Go +client libraries, which allow gRPC and HTTP Go applications to use the Secure Session +Agent. diff --git a/event-exporter/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/event-exporter/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go new file mode 100644 index 000000000..034d1b912 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fallback provides default implementations of fallback options when S2A fails. +package fallback + +import ( + "context" + "crypto/tls" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +const ( + alpnProtoStrH2 = "h2" + alpnProtoStrHTTP = "http/1.1" + defaultHTTPSPort = "443" +) + +// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. +// It supports GRPC use case, thus the alpn is set to 'h2'. +var FallbackTLSConfigGRPC = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2}, +} + +// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. +// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. +var FallbackTLSConfigHTTP = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, +} + +// ClientHandshake establishes a TLS connection and returns it, plus its auth info. +// Inputs: +// +// targetServer: the server attempted with S2A. +// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. +// If fallback is successful, the `conn` should be closed. +// err: the error encountered when performing the client-side TLS handshake with S2A. +type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) + +// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, +// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. +// Example use: +// +// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, +// FallbackOpts: &s2a.FallbackOptions{ // optional +// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), +// }, +// }) +// +// The fallback server's certificate must be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { + var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} + return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) +} + +func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, err + } + return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { + fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) + if fbErr != nil { + grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) + return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) + } + + tc, success := fbConn.(*tls.Conn) + if !success { + grpclog.Infof("the connection with fallback server is expected to be tls but isn't") + return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) + } + + tlsInfo := credentials.TLSInfo{ + State: tc.ConnectionState(), + CommonAuthInfo: credentials.CommonAuthInfo{ + SecurityLevel: credentials.PrivacyAndIntegrity, + }, + } + if grpclog.V(1) { + grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) + grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) + grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) + } + conn.Close() + return fbConn, tlsInfo, nil + }, nil +} + +// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. +// Example use: +// +// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// FallbackOpts: &s2a.FallbackOptions{ +// FallbackDialer: &s2a.FallbackDialer{ +// Dialer: fallbackDialer, +// ServerAddr: fallbackServerAddr, +// }, +// }, +// }) +// +// The fallback server's certificate should be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, "", err + } + return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil +} + +func processFallbackAddr(fallbackAddr string) (string, error) { + var fallbackServerAddr string + var err error + + if fallbackAddr == "" { + return "", fmt.Errorf("empty fallback address") + } + _, _, err = net.SplitHostPort(fallbackAddr) + if err != nil { + // fallbackAddr does not have port suffix + fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) + } else { + // FallbackServerAddr already has port suffix + fallbackServerAddr = fallbackAddr + } + return fallbackServerAddr, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/event-exporter/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go new file mode 100644 index 000000000..aa3967f9d --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provides authentication and authorization information that +// results from the TLS handshake. +package authinfo + +import ( + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" + grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "google.golang.org/grpc/credentials" +) + +var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) + +const s2aAuthType = "s2a" + +// S2AAuthInfo exposes authentication and authorization information from the +// S2A session result to the gRPC stack. +type S2AAuthInfo struct { + s2aContext *contextpb.S2AContext + commonAuthInfo credentials.CommonAuthInfo +} + +// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. +func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { + return newS2AAuthInfo(result) +} + +func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { + if result == nil { + return nil, errors.New("NewS2aAuthInfo given nil session result") + } + return &S2AAuthInfo{ + s2aContext: &contextpb.S2AContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + TlsVersion: result.GetState().GetTlsVersion(), + Ciphersuite: result.GetState().GetTlsCiphersuite(), + PeerIdentity: result.GetPeerIdentity(), + LocalIdentity: result.GetLocalIdentity(), + PeerCertFingerprint: result.GetPeerCertFingerprint(), + LocalCertFingerprint: result.GetLocalCertFingerprint(), + IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), + }, + commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + }, nil +} + +// AuthType returns the authentication type. +func (s *S2AAuthInfo) AuthType() string { + return s2aAuthType +} + +// ApplicationProtocol returns the application protocol, e.g. "grpc". +func (s *S2AAuthInfo) ApplicationProtocol() string { + return s.s2aContext.GetApplicationProtocol() +} + +// TLSVersion returns the TLS version negotiated during the handshake. +func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { + return s.s2aContext.GetTlsVersion() +} + +// Ciphersuite returns the ciphersuite negotiated during the handshake. +func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { + return s.s2aContext.GetCiphersuite() +} + +// PeerIdentity returns the authenticated identity of the peer. +func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { + return s.s2aContext.GetPeerIdentity() +} + +// LocalIdentity returns the local identity of the application used during +// session setup. +func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { + return s.s2aContext.GetLocalIdentity() +} + +// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in +// the S2A handshake. +func (s *S2AAuthInfo) PeerCertFingerprint() []byte { + return s.s2aContext.GetPeerCertFingerprint() +} + +// LocalCertFingerprint returns the SHA256 hash of the local certificate used +// in the S2A handshake. +func (s *S2AAuthInfo) LocalCertFingerprint() []byte { + return s.s2aContext.GetLocalCertFingerprint() +} + +// IsHandshakeResumed returns true if a cached session was used to resume +// the handshake. +func (s *S2AAuthInfo) IsHandshakeResumed() bool { + return s.s2aContext.GetIsHandshakeResumed() +} + +// SecurityLevel returns the security level of the connection. +func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { + return s.commonAuthInfo.SecurityLevel +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go new file mode 100644 index 000000000..8297c9a97 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go @@ -0,0 +1,438 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker communicates with the S2A handshaker service. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + "github.com/google/s2a-go/internal/authinfo" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/record" + "github.com/google/s2a-go/internal/tokenmanager" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // appProtocol contains the application protocol accepted by the handshaker. + appProtocol = "grpc" + // frameLimit is the maximum size of a frame in bytes. + frameLimit = 1024 * 64 + // peerNotRespondingError is the error thrown when the peer doesn't respond. + errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") +) + +// Handshaker defines a handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a TLS handshake from the client side, + // and returns a secure connection along with additional auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a TLS handshake from the server side, + // and returns a secure connection along with additional auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the handshake + // is complete. + Close() error +} + +// ClientHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the client-side. +type ClientHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the client. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the client. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // client. + TLSCiphersuites []commonpb.Ciphersuite + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []*commonpb.Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity. + LocalIdentity *commonpb.Identity + // TargetName is the allowed server name, which may be used for server + // authorization check by the S2A if it is provided. + TargetName string + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// ServerHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the server-side. +type ServerHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the server. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the server. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // server. + TLSCiphersuites []commonpb.Ciphersuite + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity. + LocalIdentities []*commonpb.Identity +} + +// s2aHandshaker performs a TLS handshake using the S2A handshaker service. +type s2aHandshaker struct { + // stream is used to communicate with the S2A handshaker service. + stream s2apb.S2AService_SetUpSessionClient + // conn is the connection to the peer. + conn net.Conn + // clientOpts should be non-nil iff the handshaker is client-side. + clientOpts *ClientHandshakerOptions + // serverOpts should be non-nil iff the handshaker is server-side. + serverOpts *ServerHandshakerOptions + // isClient determines if the handshaker is client or server side. + isClient bool + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // localIdentities is the set of local identities for whom the + // tokenManager should fetch a token when preparing a request to be + // sent to S2A. + localIdentities []*commonpb.Identity +} + +// NewClientHandshaker creates an s2aHandshaker instance that performs a +// client-side TLS handshake using the S2A handshaker service. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = []*commonpb.Identity{opts.LocalIdentity} + } + return &s2aHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + isClient: true, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// NewServerHandshaker creates an s2aHandshaker instance that performs a +// server-side TLS handshake using the S2A handshaker service. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = opts.LocalIdentities + } + return &s2aHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + isClient: false, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// ClientHandshake performs a client-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if !h.isClient { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") + } + // Extract the hostname from the target name. The target name is assumed to be an authority. + hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) + if err != nil { + // If the target name had no host port or could not be parsed, use it as is. + hostname = h.clientOpts.TargetName + } + + // Prepare a client start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ClientStart{ + ClientStart: &s2apb.ClientSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.clientOpts.MinTLSVersion, + MaxTlsVersion: h.clientOpts.MaxTLSVersion, + TlsCiphersuites: h.clientOpts.TLSCiphersuites, + TargetIdentities: h.clientOpts.TargetIdentities, + LocalIdentity: h.clientOpts.LocalIdentity, + TargetName: hostname, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// ServerHandshake performs a server-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if h.isClient { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") + } + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + // Prepare a server start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ServerStart{ + ServerStart: &s2apb.ServerSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.serverOpts.MinTLSVersion, + MaxTlsVersion: h.serverOpts.MaxTLSVersion, + TlsCiphersuites: h.serverOpts.TLSCiphersuites, + LocalIdentities: h.serverOpts.LocalIdentities, + InBytes: p[:n], + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// setUpSession proxies messages between the peer and the S2A handshaker +// service. +func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check if the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + // Calculate the extra unread bytes from the Session. Attempting to consume + // more than the bytes sent will throw an error. + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + if result.GetLocalIdentity() == nil { + return nil, nil, errors.New("local identity must be populated in session result") + } + + // Create a new TLS record protocol using the Session Result. + newConn, err := record.NewConn(&record.ConnParameters{ + NetConn: h.conn, + Ciphersuite: result.GetState().GetTlsCiphersuite(), + TLSVersion: result.GetState().GetTlsVersion(), + InTrafficSecret: result.GetState().GetInKey(), + OutTrafficSecret: result.GetState().GetOutKey(), + UnusedBuf: extra, + InSequence: result.GetState().GetInSequence(), + OutSequence: result.GetState().GetOutSequence(), + HSAddr: h.hsAddr, + ConnectionID: result.GetState().GetConnectionId(), + LocalIdentity: result.GetLocalIdentity(), + EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), + }) + if err != nil { + return nil, nil, err + } + return newConn, result, nil +} + +func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { + if h.clientOpts == nil { + return nil + } + return h.clientOpts.EnsureProcessSessionTickets +} + +// accessHandshakerService sends the session request to the S2A handshaker +// service and returns the session response. +func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone continues proxying messages between the peer and the S2A +// handshaker service until the handshaker service returns the SessionResult at +// the end of the handshake or an error occurs. +func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, unusedBytes, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service and nothing is + // received from the peer, then we are stuck. This covers the case when + // the peer is not responding. Note that handshaker service connection + // issues are caught in accessHandshakerService before we even get + // here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, errPeerNotResponding + } + // Append extra bytes from the previous interaction with the handshaker + // service with the current buffer read from conn. + p := append(unusedBytes, buf[:n]...) + // From here on, p and unusedBytes point to the same slice. + resp, err = h.accessHandshakerService(&s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_Next{ + Next: &s2apb.SessionNextReq{ + InBytes: p, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + }) + if err != nil { + return nil, nil, err + } + + // Cache the local identity returned by S2A, if it is populated. This + // overwrites any existing local identities. This is done because, once the + // S2A has selected a local identity, then only that local identity should + // be asserted in future requests until the end of the current handshake. + if resp.GetLocalIdentity() != nil { + h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} + } + + // Set unusedBytes based on the handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + unusedBytes = p[resp.GetBytesConsumed():] + } +} + +// Close shuts down the handshaker and the stream to the S2A handshaker service +// when the handshake is complete. It should be called when the caller obtains +// the secure connection at the end of the handshake. +func (h *s2aHandshaker) Close() error { + return h.stream.CloseSend() +} + +func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if h.tokenManager == nil { + return nil + } + // First handle the special case when no local identities have been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if len(h.localIdentities) == 0 { + token, err := h.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has provided + // one or more local identities. + var authMechanisms []*s2apb.AuthenticationMechanism + for _, localIdentity := range h.localIdentities { + token, err := h.tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) + continue + } + + authMechanism := &s2apb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + } + authMechanisms = append(authMechanisms, authMechanism) + } + return authMechanisms +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go new file mode 100644 index 000000000..ed4496537 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service is a utility for calling the S2A handshaker service. +package service + +import ( + "context" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConnMap represents a mapping from an S2A handshaker service address + // to a corresponding connection to an S2A handshaker service instance. + hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.DialContext +) + +// Dial dials the S2A handshaker service. If a connection has already been +// established, this function returns it. Otherwise, a new connection is +// created. +func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[handshakerServiceAddress] + if !ok { + // Create a new connection to the S2A handshaker service. Note that + // this connection stays open until the application is closed. + var grpcOpts []grpc.DialOption + if transportCreds != nil { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds)) + } else { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + var err error + hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...) + if err != nil { + return nil, err + } + hsConnMap[handshakerServiceAddress] = hsConn + } + return hsConn, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go new file mode 100644 index 000000000..16278a1d9 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -0,0 +1,389 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The ciphersuites supported by S2A. The name determines the confidentiality, +// and authentication ciphers as well as the hash algorithm used for PRF in +// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: +// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. +// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. +type Ciphersuite int32 + +const ( + Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 + Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 + Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "AES_128_GCM_SHA256", + 1: "AES_256_GCM_SHA384", + 2: "CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "AES_128_GCM_SHA256": 0, + "AES_256_GCM_SHA384": 1, + "CHACHA20_POLY1305_SHA256": 2, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS1_2 TLSVersion = 0 + TLSVersion_TLS1_3 TLSVersion = 1 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS1_2", + 1: "TLS1_3", + } + TLSVersion_value = map[string]int32{ + "TLS1_2": 0, + "TLS1_3": 1, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_MdbUsername + // *Identity_GaiaId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetMdbUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { + return x.MdbUsername + } + return "" +} + +func (x *Identity) GetGaiaId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { + return x.GaiaId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_MdbUsername struct { + // The MDB username of a connection endpoint. + MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +} + +type Identity_GaiaId struct { + // The Gaia ID of a connection endpoint. + GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} + +func (*Identity_GaiaId) isIdentity_IdentityOneof() {} + +var File_internal_proto_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_common_common_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, + 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, + 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, + 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, + 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, + 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_common_common_proto_rawDescOnce sync.Once + file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc +) + +func file_internal_proto_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) + }) + return file_internal_proto_common_common_proto_rawDescData +} + +var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.TLSVersion + (*Identity)(nil), // 2: s2a.proto.Identity + nil, // 3: s2a.proto.Identity.AttributesEntry +} +var file_internal_proto_common_common_proto_depIdxs = []int32{ + 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_common_common_proto_init() } +func file_internal_proto_common_common_proto_init() { + if File_internal_proto_common_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_MdbUsername)(nil), + (*Identity_GaiaId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_common_common_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_common_common_proto_msgTypes, + }.Build() + File_internal_proto_common_common_proto = out.File + file_internal_proto_common_common_proto_rawDesc = nil + file_internal_proto_common_common_proto_goTypes = nil + file_internal_proto_common_common_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 000000000..f4f763ae1 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,267 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection, e.g., 'grpc'. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // Set to true if a cached session was reused to resume the handshake. + IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +func (x *S2AContext) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *S2AContext) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, + 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.S2AContext + (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite + (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity +} +var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion + 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite + 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity + 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } +func file_internal_proto_s2a_context_s2a_context_proto_init() { + if File_internal_proto_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_s2a_context_s2a_context_proto = out.File + file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go new file mode 100644 index 000000000..0a86ebee5 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -0,0 +1,1377 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (Optional) Application may specify an identity associated to an + // authentication mechanism. Otherwise, S2A assumes that the authentication + // mechanism is associated with the default identity. If the default identity + // cannot be determined, session setup fails. + Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to the S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type ClientSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the client, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the client is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, session setup fails. + TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, S2A chooses + // the default local identity. If the default identity cannot be determined, + // session setup fails. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The target name that is used by S2A to configure SNI in the TLS handshake. + // It is also used to perform server authorization check if avaiable. This + // check is intended to verify that the peer authenticated identity is + // authorized to run a service with the target name. + // This field MUST only contain the host portion of the server address. It + // MUST not contain the scheme or the port number. For example, if the server + // address is dns://www.example.com:443, the value of this field should be + // set to www.example.com. + TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` +} + +func (x *ClientSessionStartReq) Reset() { + *x = ClientSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSessionStartReq) ProtoMessage() {} + +func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. +func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +type ServerSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the server is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, S2A chooses the default local identity. If the + // default identity cannot be determined, session setup fails. + LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // The byte representation of the first handshake message received from the + // client peer. It is possible that this first message is split into multiple + // chunks. In this case, the first chunk is sent using this field and the + // following chunks are sent using the in_bytes field of SessionNextReq + // Specifically, if the client peer is using S2A, this field contains the + // bytes in the out_frames field of SessionResp message that the client peer + // received from its S2A after initiating the handshake. + InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *ServerSessionStartReq) Reset() { + *x = ServerSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerSessionStartReq) ProtoMessage() {} + +func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. +func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +func (x *ServerSessionStartReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type SessionNextReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of session setup, i.e., handshake messages. + // Specifically: + // - All handshake messages sent from the server to the client. + // - All, except for the first, handshake messages sent from the client to + // the server. Note that the first message is communicated to S2A using the + // in_bytes field of ServerSessionStartReq. + // + // If the peer is using S2A, this field contains the bytes in the out_frames + // field of SessionResp message that the peer received from its S2A. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *SessionNextReq) Reset() { + *x = SessionNextReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionNextReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionNextReq) ProtoMessage() {} + +func (x *SessionNextReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. +func (*SessionNextReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *SessionNextReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type ResumptionTicketReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of a NewSessionTicket message received from the + // server. + InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // A connection identifier that was created and sent by S2A at the end of a + // handshake. + ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // The local identity that was used by S2A during session setup and included + // in |SessionResult|. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` +} + +func (x *ResumptionTicketReq) Reset() { + *x = ResumptionTicketReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumptionTicketReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumptionTicketReq) ProtoMessage() {} + +func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. +func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (x *ResumptionTicketReq) GetInBytes() [][]byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *ResumptionTicketReq) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *SessionReq_ClientStart + // *SessionReq_ServerStart + // *SessionReq_Next + // *SessionReq_ResumptionTicket + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` + // (Optional) The authentication mechanisms that the client wishes to use to + // authenticate to the S2A, ordered by preference. The S2A will always use the + // first authentication mechanism that appears in the list and is supported by + // the S2A. + AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetClientStart() *ClientSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *SessionReq) GetServerStart() *ServerSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *SessionReq) GetNext() *SessionNextReq { + if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { + return x.Next + } + return nil +} + +func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { + if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { + return x.ResumptionTicket + } + return nil +} + +func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthMechanisms + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_ClientStart struct { + // The client session setup request message. + ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type SessionReq_ServerStart struct { + // The server session setup request message. + ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type SessionReq_Next struct { + // The next session setup message request message. + Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +type SessionReq_ResumptionTicket struct { + // The resumption ticket that is received from the server. This message is + // only accepted by S2A if it is running as a client and if it is received + // after session setup is complete. If S2A is running as a server and it + // receives this message, the session is terminated. + ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` +} + +func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_Next) isSessionReq_ReqOneof() {} + +func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} + +type SessionState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` + // The sequence number of the next, incoming, TLS record. + InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` + // The sequence number of the next, outgoing, TLS record. + OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` + // The key for the inbound direction. + InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` + // The key for the outbound direction. + OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` + // The constant part of the record nonce for the outbound direction. + InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` + // The constant part of the record nonce for the inbound direction. + OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` + // A connection identifier that can be provided to S2A to perform operations + // related to this connection. This identifier will be stored by the record + // protocol, and included in the |ResumptionTicketReq| message that is later + // sent back to S2A. This field is set only for client-side connections. + ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // Set to true if a cached session was reused to do an abbreviated handshake. + IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *SessionState) Reset() { + *x = SessionState{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionState) ProtoMessage() {} + +func (x *SessionState) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. +func (*SessionState) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *SessionState) GetInSequence() uint64 { + if x != nil { + return x.InSequence + } + return 0 +} + +func (x *SessionState) GetOutSequence() uint64 { + if x != nil { + return x.OutSequence + } + return 0 +} + +func (x *SessionState) GetInKey() []byte { + if x != nil { + return x.InKey + } + return nil +} + +func (x *SessionState) GetOutKey() []byte { + if x != nil { + return x.OutKey + } + return nil +} + +func (x *SessionState) GetInFixedNonce() []byte { + if x != nil { + return x.InFixedNonce + } + return nil +} + +func (x *SessionState) GetOutFixedNonce() []byte { + if x != nil { + return x.OutFixedNonce + } + return nil +} + +func (x *SessionState) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *SessionState) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +type SessionResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this session. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The session state at the end. This state contains all cryptographic + // material required to initialize the record protocol object. + State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` +} + +func (x *SessionResult) Reset() { + *x = SessionResult{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResult) ProtoMessage() {} + +func (x *SessionResult) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. +func (*SessionResult) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *SessionResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *SessionResult) GetState() *SessionState { + if x != nil { + return x.State + } + return nil +} + +func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResult) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *SessionResult) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +type SessionStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *SessionStatus) Reset() { + *x = SessionStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionStatus) ProtoMessage() {} + +func (x *SessionStatus) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. +func (*SessionStatus) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *SessionStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *SessionStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + // + // If the SessionResult is populated, then this must coincide with the local + // identity specified in the SessionResult; otherwise, the handshake must + // fail. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The byte representation of the frames that should be sent to the peer. May + // be empty if nothing needs to be sent to the peer or if in_bytes in the + // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent + // to the peer even if the session setup status is not OK as these frames may + // contain appropriate alerts. + OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes field that are consumed by S2A. It is + // possible that part of in_bytes is unrelated to the session setup process. + BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set if the session is successfully set up. out_frames may + // still be set to frames that needs to be forwarded to the peer. + Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + // Status of session setup at the current stage. + Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *SessionResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *SessionResp) GetResult() *SessionResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *SessionResp) GetStatus() *SessionStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, + 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, + 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, + 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, + 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, + 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, + 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, + 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, + 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, + 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, + 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, + 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, + 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, + 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, + 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, + 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ + (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism + (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq + (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq + (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq + (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq + (*SessionReq)(nil), // 5: s2a.proto.SessionReq + (*SessionState)(nil), // 6: s2a.proto.SessionState + (*SessionResult)(nil), // 7: s2a.proto.SessionResult + (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus + (*SessionResp)(nil), // 9: s2a.proto.SessionResp + (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity + (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite +} +var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ + 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity + 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity + 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity + 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity + 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq + 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq + 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq + 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq + 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism + 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion + 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite + 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState + 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity + 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity + 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity + 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult + 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus + 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq + 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp + 25, // [25:26] is the sub-list for method output_type + 24, // [24:25] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_s2a_proto_init() } +func file_internal_proto_s2a_s2a_proto_init() { + if File_internal_proto_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionNextReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumptionTicketReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*SessionReq_ClientStart)(nil), + (*SessionReq_ServerStart)(nil), + (*SessionReq_Next)(nil), + (*SessionReq_ResumptionTicket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_s2a_s2a_proto = out.File + file_internal_proto_s2a_s2a_proto_rawDesc = nil + file_internal_proto_s2a_s2a_proto_goTypes = nil + file_internal_proto_s2a_s2a_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 000000000..0fa582fc8 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,173 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/s2a/s2a.proto", +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go new file mode 100644 index 000000000..c84bed977 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -0,0 +1,367 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using +// S2A. +type Ciphersuite int32 + +const ( + Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "CIPHERSUITE_UNSPECIFIED", + 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "CIPHERSUITE_UNSPECIFIED": 0, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, + "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, + "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 + TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 + TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 + TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 + TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS_VERSION_UNSPECIFIED", + 1: "TLS_VERSION_1_0", + 2: "TLS_VERSION_1_1", + 3: "TLS_VERSION_1_2", + 4: "TLS_VERSION_1_3", + } + TLSVersion_value = map[string]int32{ + "TLS_VERSION_UNSPECIFIED": 0, + "TLS_VERSION_1_0": 1, + "TLS_VERSION_1_1": 2, + "TLS_VERSION_1_2": 3, + "TLS_VERSION_1_3": 4, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} +} + +// The side in the TLS connection. +type ConnectionSide int32 + +const ( + ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 + ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 + ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 +) + +// Enum value maps for ConnectionSide. +var ( + ConnectionSide_name = map[int32]string{ + 0: "CONNECTION_SIDE_UNSPECIFIED", + 1: "CONNECTION_SIDE_CLIENT", + 2: "CONNECTION_SIDE_SERVER", + } + ConnectionSide_value = map[string]int32{ + "CONNECTION_SIDE_UNSPECIFIED": 0, + "CONNECTION_SIDE_CLIENT": 1, + "CONNECTION_SIDE_SERVER": 2, + } +) + +func (x ConnectionSide) Enum() *ConnectionSide { + p := new(ConnectionSide) + *p = x + return p +} + +func (x ConnectionSide) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() +} + +func (ConnectionSide) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[2] +} + +func (x ConnectionSide) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionSide.Descriptor instead. +func (ConnectionSide) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} +} + +// The ALPN protocols that the application can negotiate during a TLS handshake. +type AlpnProtocol int32 + +const ( + AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 + AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 + AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 + AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 +) + +// Enum value maps for AlpnProtocol. +var ( + AlpnProtocol_name = map[int32]string{ + 0: "ALPN_PROTOCOL_UNSPECIFIED", + 1: "ALPN_PROTOCOL_GRPC", + 2: "ALPN_PROTOCOL_HTTP2", + 3: "ALPN_PROTOCOL_HTTP1_1", + } + AlpnProtocol_value = map[string]int32{ + "ALPN_PROTOCOL_UNSPECIFIED": 0, + "ALPN_PROTOCOL_GRPC": 1, + "ALPN_PROTOCOL_HTTP2": 2, + "ALPN_PROTOCOL_HTTP1_1": 3, + } +) + +func (x AlpnProtocol) Enum() *AlpnProtocol { + p := new(AlpnProtocol) + *p = x + return p +} + +func (x AlpnProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() +} + +func (AlpnProtocol) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[3] +} + +func (x AlpnProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlpnProtocol.Descriptor instead. +func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} +} + +var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, + 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, + 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, + 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, + 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, + 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, + 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, + 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, + 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once + file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc +) + +func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) + }) + return file_internal_proto_v2_common_common_proto_rawDescData +} + +var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion + (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide + (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol +} +var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_common_common_proto_init() } +func file_internal_proto_v2_common_common_proto_init() { + if File_internal_proto_v2_common_common_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + }.Build() + File_internal_proto_v2_common_common_proto = out.File + file_internal_proto_v2_common_common_proto_rawDesc = nil + file_internal_proto_v2_common_common_proto_goTypes = nil + file_internal_proto_v2_common_common_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 000000000..b7fd871c7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,248 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The SPIFFE ID from the peer leaf certificate, if present. + // + // This field is only populated if the leaf certificate is a valid SPIFFE + // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid + // SPIFFE ID. + LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` + // The URIs that are present in the SubjectAltName extension of the peer leaf + // certificate. + // + // Note that the extracted URIs are not validated and may not be properly + // formatted. + LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` + // The DNSNames that are present in the SubjectAltName extension of the peer + // leaf certificate. + LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` + // The (ordered) list of fingerprints in the certificate chain used to verify + // the given leaf certificate. The order MUST be from leaf certificate + // fingerprint to root certificate fingerprint. + // + // A fingerprint is the base-64 encoding of the SHA256 hash of the + // DER-encoding of a certificate. The list MAY be populated even if the peer + // certificate chain was NOT validated successfully. + PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` + // The local identity used during session setup. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the DER-encoding of the local leaf certificate used in + // the handshake. + LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetLeafCertSpiffeId() string { + if x != nil { + return x.LeafCertSpiffeId + } + return "" +} + +func (x *S2AContext) GetLeafCertUris() []string { + if x != nil { + return x.LeafCertUris + } + return nil +} + +func (x *S2AContext) GetLeafCertDnsnames() []string { + if x != nil { + return x.LeafCertDnsnames + } + return nil +} + +func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { + if x != nil { + return x.PeerCertificateChainFingerprints + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { + if x != nil { + return x.LocalLeafCertFingerprint + } + return nil +} + +var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, + 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, + 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, + 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext + (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity +} +var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } +func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { + if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_context_s2a_context_proto = out.File + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go new file mode 100644 index 000000000..e843450c7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -0,0 +1,2494 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignatureAlgorithm int32 + +const ( + SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 + // RSA Public-Key Cryptography Standards #1. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 + // ECDSA. + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 + // RSA Probabilistic Signature Scheme. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 + // ED25519. + SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 +) + +// Enum value maps for SignatureAlgorithm. +var ( + SignatureAlgorithm_name = map[int32]string{ + 0: "S2A_SSL_SIGN_UNSPECIFIED", + 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", + 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", + 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", + 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", + 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", + 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", + 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", + 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", + 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", + 10: "S2A_SSL_SIGN_ED25519", + } + SignatureAlgorithm_value = map[string]int32{ + "S2A_SSL_SIGN_UNSPECIFIED": 0, + "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, + "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, + "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, + "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, + "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, + "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, + "S2A_SSL_SIGN_ED25519": 10, + } +) + +func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { + p := new(SignatureAlgorithm) + *p = x + return p +} + +func (x SignatureAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() +} + +func (SignatureAlgorithm) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] +} + +func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureAlgorithm.Descriptor instead. +func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 + +const ( + GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 + GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 +) + +// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. +var ( + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "DONT_REQUEST_CLIENT_CERTIFICATE", + 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", + 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", + } + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ + "UNSPECIFIED": 0, + "DONT_REQUEST_CLIENT_CERTIFICATE": 1, + "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, + "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, + } +) + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) + *p = x + return p +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} +} + +type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 + +const ( + OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 + // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of + // the TLS handshake must be signed to prove possession of the private key. + // + // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. + OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 + // When performing a TLS 1.2 handshake using an RSA algorithm, the key + // exchange algorithm involves the client generating a premaster secret, + // encrypting it using the server's public key, and sending this encrypted + // blob to the server in a ClientKeyExchange message. + // + // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. + OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 +) + +// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. +var ( + OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SIGN", + 2: "DECRYPT", + } + OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "SIGN": 1, + "DECRYPT": 2, + } +) + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { + p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) + *p = x + return p +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} +} + +type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 + +const ( + OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 + OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 + OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 +) + +// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. +var ( + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "ENCRYPT", + 2: "DECRYPT", + } + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "ENCRYPT": 1, + "DECRYPT": 2, + } +) + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) + *p = x + return p +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} +} + +type ValidatePeerCertificateChainReq_VerificationMode int32 + +const ( + // The default verification mode supported by S2A. + ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 + // The SPIFFE verification mode selects the set of trusted certificates to + // use for path building based on the SPIFFE trust domain in the peer's leaf + // certificate. + ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 + // The connect-to-Google verification mode uses the trust bundle for + // connecting to Google, e.g. *.mtls.googleapis.com endpoints. + ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 +) + +// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. +var ( + ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SPIFFE", + 2: "CONNECT_TO_GOOGLE", + } + ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + } +) + +func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { + p := new(ValidatePeerCertificateChainReq_VerificationMode) + *p = x + return p +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. +func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +type ValidatePeerCertificateChainResp_ValidationResult int32 + +const ( + ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 + ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 + ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 +) + +// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. +var ( + ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUCCESS", + 2: "FAILURE", + } + ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUCCESS": 1, + "FAILURE": 2, + } +) + +func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { + p := new(ValidatePeerCertificateChainResp_ValidationResult) + *p = x + return p +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. +func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} +} + +type AlpnPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the application MUST perform ALPN negotiation. + EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` + // The ordered list of ALPN protocols that specify how the application SHOULD + // negotiate ALPN during the TLS handshake. + // + // The application MAY ignore any ALPN protocols in this list that are not + // supported by the application. + AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` +} + +func (x *AlpnPolicy) Reset() { + *x = AlpnPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlpnPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlpnPolicy) ProtoMessage() {} + +func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. +func (*AlpnPolicy) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { + if x != nil { + return x.EnableAlpnNegotiation + } + return false +} + +func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Applications may specify an identity associated to an authentication + // mechanism. Otherwise, S2A assumes that the authentication mechanism is + // associated with the default identity. If the default identity cannot be + // determined, the request is rejected. + Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *Status) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type GetTlsConfigurationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The role of the application in the TLS connection. + ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` + // The server name indication (SNI) extension, which MAY be populated when a + // server is offloading to S2A. The SNI is used to determine the server + // identity if the local identity in the request is empty. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` +} + +func (x *GetTlsConfigurationReq) Reset() { + *x = GetTlsConfigurationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationReq) ProtoMessage() {} + +func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { + if x != nil { + return x.ConnectionSide + } + return common_go_proto.ConnectionSide(0) +} + +func (x *GetTlsConfigurationReq) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +type GetTlsConfigurationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TlsConfiguration: + // + // *GetTlsConfigurationResp_ClientTlsConfiguration_ + // *GetTlsConfigurationResp_ServerTlsConfiguration_ + TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` +} + +func (x *GetTlsConfigurationResp) Reset() { + *x = GetTlsConfigurationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp) ProtoMessage() {} + +func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { + if m != nil { + return m.TlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { + return x.ClientTlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { + return x.ServerTlsConfiguration + } + return nil +} + +type isGetTlsConfigurationResp_TlsConfiguration interface { + isGetTlsConfigurationResp_TlsConfiguration() +} + +type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { + ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { + ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +type OffloadPrivateKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the private key is used for. + Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` + // The signature algorithm to be used for signing operations. + SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` + // The input bytes to be signed or decrypted. + // + // Types that are assignable to InBytes: + // + // *OffloadPrivateKeyOperationReq_RawBytes + // *OffloadPrivateKeyOperationReq_Sha256Digest + // *OffloadPrivateKeyOperationReq_Sha384Digest + // *OffloadPrivateKeyOperationReq_Sha512Digest + InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` +} + +func (x *OffloadPrivateKeyOperationReq) Reset() { + *x = OffloadPrivateKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { + if x != nil { + return x.Operation + } + return OffloadPrivateKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { + if x != nil { + return x.SignatureAlgorithm + } + return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED +} + +func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { + if m != nil { + return m.InBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { + return x.RawBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { + return x.Sha256Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { + return x.Sha384Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { + return x.Sha512Digest + } + return nil +} + +type isOffloadPrivateKeyOperationReq_InBytes interface { + isOffloadPrivateKeyOperationReq_InBytes() +} + +type OffloadPrivateKeyOperationReq_RawBytes struct { + // Raw bytes to be hashed and signed, or decrypted. + RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha256Digest struct { + // A SHA256 hash to be signed. Must be 32 bytes. + Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha384Digest struct { + // A SHA384 hash to be signed. Must be 48 bytes. + Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha512Digest struct { + // A SHA512 hash to be signed. Must be 64 bytes. + Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` +} + +func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +type OffloadPrivateKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The signed or decrypted output bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadPrivateKeyOperationResp) Reset() { + *x = OffloadPrivateKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type OffloadResumptionKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the resumption key is used for. + Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` + // The bytes to be encrypted or decrypted. + InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationReq) Reset() { + *x = OffloadResumptionKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + if x != nil { + return x.Operation + } + return OffloadResumptionKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type OffloadResumptionKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encrypted or decrypted bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationResp) Reset() { + *x = OffloadResumptionKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type ValidatePeerCertificateChainReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification mode that S2A MUST use to validate the peer certificate + // chain. + Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` + // Types that are assignable to PeerOneof: + // + // *ValidatePeerCertificateChainReq_ClientPeer_ + // *ValidatePeerCertificateChainReq_ServerPeer_ + PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` +} + +func (x *ValidatePeerCertificateChainReq) Reset() { + *x = ValidatePeerCertificateChainReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { + if x != nil { + return x.Mode + } + return ValidatePeerCertificateChainReq_UNSPECIFIED +} + +func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { + if m != nil { + return m.PeerOneof + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { + return x.ClientPeer + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { + return x.ServerPeer + } + return nil +} + +type isValidatePeerCertificateChainReq_PeerOneof interface { + isValidatePeerCertificateChainReq_PeerOneof() +} + +type ValidatePeerCertificateChainReq_ClientPeer_ struct { + ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` +} + +type ValidatePeerCertificateChainReq_ServerPeer_ struct { + ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` +} + +func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +type ValidatePeerCertificateChainResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The result of validating the peer certificate chain. + ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` + // The validation details. This field is only populated when the validation + // result is NOT SUCCESS. + ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` + // The S2A context contains information from the peer certificate chain. + // + // The S2A context MAY be populated even if validation of the peer certificate + // chain fails. + Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ValidatePeerCertificateChainResp) Reset() { + *x = ValidatePeerCertificateChainResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainResp) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { + if x != nil { + return x.ValidationResult + } + return ValidatePeerCertificateChainResp_UNSPECIFIED +} + +func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { + if x != nil { + return x.ValidationDetails + } + return "" +} + +func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { + if x != nil { + return x.Context + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identity corresponding to the TLS configurations that MUST be used for + // the TLS handshake. + // + // If a managed identity already exists, the local identity and authentication + // mechanisms are ignored. If a managed identity doesn't exist and the local + // identity is not populated, S2A will try to deduce the managed identity to + // use from the SNI extension. If that also fails, S2A uses the default + // identity (if one exists). + LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The authentication mechanisms that the application wishes to use to + // authenticate to S2A, ordered by preference. S2A will always use the first + // authentication mechanism that matches the managed identity. + AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` + // Types that are assignable to ReqOneof: + // + // *SessionReq_GetTlsConfigurationReq + // *SessionReq_OffloadPrivateKeyOperationReq + // *SessionReq_OffloadResumptionKeyOperationReq + // *SessionReq_ValidatePeerCertificateChainReq + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} +} + +func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthenticationMechanisms + } + return nil +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { + if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { + return x.GetTlsConfigurationReq + } + return nil +} + +func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { + return x.OffloadPrivateKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { + return x.OffloadResumptionKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { + if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { + return x.ValidatePeerCertificateChainReq + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_GetTlsConfigurationReq struct { + // Requests the certificate chain and TLS configuration corresponding to the + // local identity, which the application MUST use to negotiate the TLS + // handshake. + GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` +} + +type SessionReq_OffloadPrivateKeyOperationReq struct { + // Signs or decrypts the input bytes using a private key corresponding to + // the local identity in the request. + // + // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the + // S2Av2 by a server during a TLS 1.2 handshake. + OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` +} + +type SessionReq_OffloadResumptionKeyOperationReq struct { + // Encrypts or decrypts the input bytes using a resumption key corresponding + // to the local identity in the request. + OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` +} + +type SessionReq_ValidatePeerCertificateChainReq struct { + // Verifies the peer's certificate chain using + // (a) trust bundles corresponding to the local identity in the request, and + // (b) the verification mode in the request. + ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` +} + +func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Status of the session response. + // + // The status field is populated so that if an error occurs when making an + // individual request, then communication with the S2A may continue. If an + // error is returned directly (e.g. at the gRPC layer), then it may result + // that the bidirectional stream being closed. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Types that are assignable to RespOneof: + // + // *SessionResp_GetTlsConfigurationResp + // *SessionResp_OffloadPrivateKeyOperationResp + // *SessionResp_OffloadResumptionKeyOperationResp + // *SessionResp_ValidatePeerCertificateChainResp + RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} +} + +func (x *SessionResp) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { + if m != nil { + return m.RespOneof + } + return nil +} + +func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { + if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { + return x.GetTlsConfigurationResp + } + return nil +} + +func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { + return x.OffloadPrivateKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { + return x.OffloadResumptionKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { + if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { + return x.ValidatePeerCertificateChainResp + } + return nil +} + +type isSessionResp_RespOneof interface { + isSessionResp_RespOneof() +} + +type SessionResp_GetTlsConfigurationResp struct { + // Contains the certificate chain and TLS configurations corresponding to + // the local identity. + GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` +} + +type SessionResp_OffloadPrivateKeyOperationResp struct { + // Contains the signed or encrypted output bytes using the private key + // corresponding to the local identity. + OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` +} + +type SessionResp_OffloadResumptionKeyOperationResp struct { + // Contains the encrypted or decrypted output bytes using the resumption key + // corresponding to the local identity. + OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` +} + +type SessionResp_ValidatePeerCertificateChainResp struct { + // Contains the validation result, peer identity and fingerprints of peer + // certificates. + ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` +} + +func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} + +// Next ID: 8 +type GetTlsConfigurationResp_ClientTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the client MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // minimum version of the client's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // maximum version of the client's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // The policy that dictates how the client negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ClientTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +// Next ID: 12 +type GetTlsConfigurationResp_ServerTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the server MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // minimum version of the server's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // maximum version of the server's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // Whether to enable TLS resumption. + TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` + // Whether the server MUST request a client certificate (i.e. to negotiate + // TLS vs. mTLS). + RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` + // Returns the maximum number of extra bytes that + // |OffloadResumptionKeyOperation| can add to the number of unencrypted + // bytes to form the encrypted bytes. + MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` + // The policy that dictates how the server negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ServerTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { + if x != nil { + return x.TlsResumptionEnabled + } + return false +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + if x != nil { + return x.RequestClientCertificate + } + return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { + if x != nil { + return x.MaxOverheadOfTicketAead + } + return 0 +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +type ValidatePeerCertificateChainReq_ClientPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ClientPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +type ValidatePeerCertificateChainReq_ServerPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The expected hostname of the server. + ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` + // The UnrestrictedClientPolicy specified by the user. + SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ServerPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { + if x != nil { + return x.ServerHostname + } + return "" +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { + if x != nil { + return x.SerializedUnrestrictedClientPolicy + } + return nil +} + +var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, + 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, + 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, + 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, + 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, + 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, + 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, + 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, + 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, + 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, + 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, + 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, + 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, + 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, + 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, + 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, + 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, + 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, + 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, + 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, + 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, + 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, + 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, + 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, + 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ + (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm + (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy + (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism + (*Status)(nil), // 8: s2a.proto.v2.Status + (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq + (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp + (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq + (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp + (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq + (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp + (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq + (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp + (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq + (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp + (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol + (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide + (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext + (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion + (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite +} +var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ + 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide + 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm + 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism + 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq + 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq + 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq + 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq + 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status + 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp + 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp + 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp + 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp + 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq + 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_s2a_proto_init() } +func file_internal_proto_v2_s2a_s2a_proto_init() { + if File_internal_proto_v2_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlpnPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), + (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OffloadPrivateKeyOperationReq_RawBytes)(nil), + (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), + (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SessionReq_GetTlsConfigurationReq)(nil), + (*SessionReq_OffloadPrivateKeyOperationReq)(nil), + (*SessionReq_OffloadResumptionKeyOperationReq)(nil), + (*SessionReq_ValidatePeerCertificateChainReq)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*SessionResp_GetTlsConfigurationResp)(nil), + (*SessionResp_OffloadPrivateKeyOperationResp)(nil), + (*SessionResp_OffloadResumptionKeyOperationResp)(nil), + (*SessionResp_ValidatePeerCertificateChainResp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, + NumEnums: 6, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, + EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, + MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_s2a_proto = out.File + file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil + file_internal_proto_v2_s2a_s2a_proto_goTypes = nil + file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 000000000..2566df6c3 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,159 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.v2.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/v2/s2a/s2a.proto", +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go new file mode 100644 index 000000000..486f4ec4f --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package aeadcrypter provides the interface for AEAD cipher implementations +// used by S2A's record protocol. +package aeadcrypter + +// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record +// protocol. +type S2AAEADCrypter interface { + // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. + // dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) + // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may + // fully overlap or not at all. + Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) + // TagSize returns the tag size in bytes. + TagSize() int +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go new file mode 100644 index 000000000..85c4e595d --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" +) + +// Supported key sizes in bytes. +const ( + AES128GCMKeySize = 16 + AES256GCMKeySize = 32 +) + +// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. +type aesgcm struct { + aead cipher.AEAD +} + +// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be +// either 128 bits or 256 bits. +func NewAESGCM(key []byte) (S2AAEADCrypter, error) { + if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { + return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aesgcm{aead: a}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *aesgcm) TagSize() int { + return TagSize +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go new file mode 100644 index 000000000..214df4ca4 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// Supported key size in bytes. +const ( + Chacha20Poly1305KeySize = 32 +) + +// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD +// crypter. +type chachapoly struct { + aead cipher.AEAD +} + +// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must +// be Chacha20Poly1305KeySize bytes in length. +func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { + if len(key) != Chacha20Poly1305KeySize { + return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) + } + c, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + return &chachapoly{aead: c}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *chachapoly) TagSize() int { + return TagSize +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go new file mode 100644 index 000000000..b3c36ad95 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" +) + +const ( + // TagSize is the tag size in bytes for AES-128-GCM-SHA256, + // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + TagSize = 16 + // NonceSize is the size of the nonce in number of bytes for + // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + NonceSize = 12 + // SHA256DigestSize is the digest size of sha256 in bytes. + SHA256DigestSize = 32 + // SHA384DigestSize is the digest size of sha384 in bytes. + SHA384DigestSize = 48 +) + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// encrypt is the encryption function for an AEAD crypter. aead determines +// the type of AEAD crypter. dst can contain bytes at the beginning of the +// ciphertext that will not be encrypted but will be authenticated. If dst has +// enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If we need to allocate an output buffer, we want to include space for + // the tag to avoid forcing the caller to reallocate as well. + dlen := len(dst) + dst, out := sliceForAppend(dst, len(plaintext)+TagSize) + data := out[:len(plaintext)] + copy(data, plaintext) // data may fully overlap plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, sliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = aead.Seal(dst[:dlen], nonce, data, aad) + return dst, nil +} + +// decrypt is the decryption function for an AEAD crypter, where aead determines +// the type of AEAD crypter, and dst the destination bytes for the decrypted +// ciphertext. The dst buffer may fully overlap with plaintext or not at all. +func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := aead.Open(dst, nonce, ciphertext, aad) + if err != nil { + return nil, fmt.Errorf("message auth failed: %v", err) + } + return plaintext, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go new file mode 100644 index 000000000..ddeaa6d77 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" +) + +// ciphersuite is the interface for retrieving ciphersuite-specific information +// and utilities. +type ciphersuite interface { + // keySize returns the key size in bytes. This refers to the key used by + // the AEAD crypter. This is derived by calling HKDF expand on the traffic + // secret. + keySize() int + // nonceSize returns the nonce size in bytes. + nonceSize() int + // trafficSecretSize returns the traffic secret size in bytes. This refers + // to the secret used to derive the traffic key and nonce, as specified in + // https://tools.ietf.org/html/rfc8446#section-7. + trafficSecretSize() int + // hashFunction returns the hash function for the ciphersuite. + hashFunction() func() hash.Hash + // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite + // using that key. + aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) +} + +func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { + switch ciphersuite { + case s2apb.Ciphersuite_AES_128_GCM_SHA256: + return &aesgcm128sha256{}, nil + case s2apb.Ciphersuite_AES_256_GCM_SHA384: + return &aesgcm256sha384{}, nil + case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: + return &chachapolysha256{}, nil + default: + return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) + } +} + +// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite +// interface. +type aesgcm128sha256 struct{} + +func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } +func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } +func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite +// interface. +type aesgcm256sha384 struct{} + +func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } +func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } +func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } +func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite +// interface. +type chachapolysha256 struct{} + +func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } +func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } +func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } +func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewChachaPoly(key) +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go new file mode 100644 index 000000000..9499cdca7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import "errors" + +// counter is a 64-bit counter. +type counter struct { + val uint64 + hasOverflowed bool +} + +// newCounter creates a new counter with the initial value set to val. +func newCounter(val uint64) counter { + return counter{val: val} +} + +// value returns the current value of the counter. +func (c *counter) value() (uint64, error) { + if c.hasOverflowed { + return 0, errors.New("counter has overflowed") + } + return c.val, nil +} + +// increment increments the counter and checks for overflow. +func (c *counter) increment() { + // If the counter is already invalid due to overflow, there is no need to + // increase it. We check for the hasOverflowed flag in the call to value(). + if c.hasOverflowed { + return + } + c.val++ + if c.val == 0 { + c.hasOverflowed = true + } +} + +// reset sets the counter value to zero and sets the hasOverflowed flag to +// false. +func (c *counter) reset() { + c.val = 0 + c.hasOverflowed = false +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go new file mode 100644 index 000000000..e05f2c36a --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "fmt" + "hash" + + "golang.org/x/crypto/hkdf" +) + +// hkdfExpander is the interface for the HKDF expansion function; see +// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is +// specified in https://tools.ietf.org/html/rfc8446#section-7.2 +type hkdfExpander interface { + // expand takes a secret, a label, and the output length in bytes, and + // returns the resulting expanded key. + expand(secret, label []byte, length int) ([]byte, error) +} + +// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf +// for HKDF expansion. +type defaultHKDFExpander struct { + h func() hash.Hash +} + +// newDefaultHKDFExpander creates an instance of the default HKDF expander +// using the given hash function. +func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { + return &defaultHKDFExpander{h: h} +} + +func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { + outBuf := make([]byte, length) + n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) + if err != nil { + return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) + } + if n < length { + return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) + } + return outBuf, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go new file mode 100644 index 000000000..dff99ff59 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 +// connection. +package halfconn + +import ( + "fmt" + "sync" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" + "golang.org/x/crypto/cryptobyte" +) + +// The constants below were taken from Section 7.2 and 7.3 in +// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label +// in HKDF-Expand-Label. +const ( + tls13Key = "tls13 key" + tls13Nonce = "tls13 iv" + tls13Update = "tls13 traffic upd" +) + +// S2AHalfConnection stores the state of the TLS 1.3 connection in the +// inbound or outbound direction. +type S2AHalfConnection struct { + cs ciphersuite + expander hkdfExpander + // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. + mutex sync.Mutex + aeadCrypter aeadcrypter.S2AAEADCrypter + sequence counter + trafficSecret []byte + nonce []byte +} + +// New creates a new instance of S2AHalfConnection given a ciphersuite and a +// traffic secret. +func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { + cs, err := newCiphersuite(ciphersuite) + if err != nil { + return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) + } + if cs.trafficSecretSize() != len(trafficSecret) { + return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) + } + + hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) + } + + return hc, nil +} + +// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. +// dst and plaintext may fully overlap or not at all. Note that the sequence +// number will still be incremented on failure, unless the sequence has +// overflowed. +func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Encrypt(dst, plaintext, nonce, aad) +} + +// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may +// fully overlap or not at all. Note that the sequence number will still be +// incremented on failure, unless the sequence has overflowed. +func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Decrypt(dst, ciphertext, nonce, aad) +} + +// UpdateKey advances the traffic secret key, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives +// a new key and nonce, and resets the sequence number. +func (hc *S2AHalfConnection) UpdateKey() error { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + var err error + hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) + if err != nil { + return fmt.Errorf("failed to derive traffic secret: %v", err) + } + + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return fmt.Errorf("failed to update half connection: %v", err) + } + + hc.sequence.reset() + return nil +} + +// TagSize returns the tag size in bytes of the underlying AEAD crypter. +func (hc *S2AHalfConnection) TagSize() int { + return hc.aeadCrypter.TagSize() +} + +// updateCrypterAndNonce takes a new traffic secret and updates the crypter +// and nonce. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { + key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) + if err != nil { + return fmt.Errorf("failed to update key: %v", err) + } + + hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) + if err != nil { + return fmt.Errorf("failed to update nonce: %v", err) + } + + hc.aeadCrypter, err = hc.cs.aeadCrypter(key) + if err != nil { + return fmt.Errorf("failed to update AEAD crypter: %v", err) + } + return nil +} + +// getAndIncrement returns the current sequence number and increments it. Note +// that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { + sequence, err := hc.sequence.value() + if err != nil { + return 0, err + } + hc.sequence.increment() + return sequence, nil +} + +// maskedNonce creates a copy of the nonce that is masked with the sequence +// number. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { + const uint64Size = 8 + nonce := make([]byte, len(hc.nonce)) + copy(nonce, hc.nonce) + for i := 0; i < uint64Size; i++ { + nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) + } + return nonce +} + +// deriveSecret implements the Derive-Secret function, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.1. +func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { + var hkdfLabel cryptobyte.Builder + hkdfLabel.AddUint16(uint16(length)) + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(label) + }) + // Append an empty `Context` field to the label, as specified in the RFC. + // The half connection does not use the `Context` field. + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte("")) + }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + return nil, fmt.Errorf("deriveSecret failed: %v", err) + } + return hc.expander.expand(secret, hkdfLabelBytes, length) +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/record.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/record.go new file mode 100644 index 000000000..c60515510 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/record.go @@ -0,0 +1,757 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package record implements the TLS 1.3 record protocol used by the S2A +// transport credentials. +package record + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "sync" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/halfconn" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/grpclog" +) + +// recordType is the `ContentType` as described in +// https://tools.ietf.org/html/rfc8446#section-5.1. +type recordType byte + +const ( + alert recordType = 21 + handshake recordType = 22 + applicationData recordType = 23 +) + +// keyUpdateRequest is the `KeyUpdateRequest` as described in +// https://tools.ietf.org/html/rfc8446#section-4.6.3. +type keyUpdateRequest byte + +const ( + updateNotRequested keyUpdateRequest = 0 + updateRequested keyUpdateRequest = 1 +) + +// alertDescription is the `AlertDescription` as described in +// https://tools.ietf.org/html/rfc8446#section-6. +type alertDescription byte + +const ( + closeNotify alertDescription = 0 +) + +// sessionTicketState is used to determine whether session tickets have not yet +// been received, are in the process of being received, or have finished +// receiving. +type sessionTicketState byte + +const ( + ticketsNotYetReceived sessionTicketState = 0 + receivingTickets sessionTicketState = 1 + notReceivingTickets sessionTicketState = 2 +) + +const ( + // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, + // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from + // https://tools.ietf.org/html/rfc8446#section-5.1. + + // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext + // in a single TLS 1.3 record. + tlsRecordMaxPlaintextSize = 16384 // 2^14 + // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. + tlsRecordTypeSize = 1 + // tlsTagSize is the size in bytes of the tag of the following three + // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, + // CHACHA20-POLY1305-SHA256. + tlsTagSize = 16 + // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a + // single TLS 1.3 record. This is the maximum size of the plaintext plus the + // record type byte and 16 bytes of the tag. + tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize + // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record + // header type. + tlsRecordHeaderTypeSize = 1 + // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS + // 1.3 record header legacy record version. + tlsRecordHeaderLegacyRecordVersionSize = 2 + // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 + // record header payload length. + tlsRecordHeaderPayloadLengthSize = 2 + // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. + tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize + // tlsRecordMaxSize + tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize + // tlsApplicationData is the application data type of the TLS 1.3 record + // header. + tlsApplicationData = 23 + // tlsLegacyRecordVersion is the legacy record version of the TLS record. + tlsLegacyRecordVersion = 3 + // tlsAlertSize is the size in bytes of an alert of TLS 1.3. + tlsAlertSize = 2 +) + +const ( + // These are TLS 1.3 handshake-specific constants. + + // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session + // ticket message of TLS 1.3. + tlsHandshakeNewSessionTicketType = 4 + // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message + // of TLS 1.3. + tlsHandshakeKeyUpdateType = 24 + // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake + // message type field. + tlsHandshakeMsgTypeSize = 1 + // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake + // message length field. + tlsHandshakeLengthSize = 3 + // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 + // handshake key update message. + tlsHandshakeKeyUpdateMsgSize = 1 + // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 + // handshake message. + tlsHandshakePrefixSize = 4 + // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message + // in TLS 1.3. This is the sum of the max sizes of all the fields in the + // NewSessionTicket struct specified in + // https://tools.ietf.org/html/rfc8446#section-4.6.1. + tlsMaxSessionTicketSize = 131338 +) + +const ( + // outBufMaxRecords is the maximum number of records that can fit in the + // ourRecordsBuf buffer. + outBufMaxRecords = 16 + // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. + outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize + // maxAllowedTickets is the maximum number of session tickets that are + // allowed. The number of tickets are limited to ensure that the size of the + // ticket queue does not grow indefinitely. S2A also keeps a limit on the + // number of tickets that it caches. + maxAllowedTickets = 5 +) + +// preConstructedKeyUpdateMsg holds the key update message. This is needed as an +// optimization so that the same message does not need to be constructed every +// time a key update message is sent. +var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() + +// conn represents a secured TLS connection. It implements the net.Conn +// interface. +type conn struct { + net.Conn + // inConn is the half connection responsible for decrypting incoming bytes. + inConn *halfconn.S2AHalfConnection + // outConn is the half connection responsible for encrypting outgoing bytes. + outConn *halfconn.S2AHalfConnection + // pendingApplicationData holds data that has been read from the connection + // and decrypted, but has not yet been returned by Read. + pendingApplicationData []byte + // unusedBuf holds data read from the network that has not yet been + // decrypted. This data might not consist of a complete record. It may + // consist of several records, the last of which could be incomplete. + unusedBuf []byte + // outRecordsBuf is a buffer used to store outgoing TLS records before + // they are written to the network. + outRecordsBuf []byte + // nextRecord stores the next record info in the unusedBuf buffer. + nextRecord []byte + // overheadSize is the overhead size in bytes of each TLS 1.3 record, which + // is computed as overheadSize = header size + record type byte + tag size. + // Note that there is no padding by zeros in the overhead calculation. + overheadSize int + // readMutex guards against concurrent calls to Read. This is required since + // Close may be called during a Read. + readMutex sync.Mutex + // writeMutex guards against concurrent calls to Write. This is required + // since Close may be called during a Write, and also because a key update + // message may be written during a Read. + writeMutex sync.Mutex + // handshakeBuf holds handshake messages while they are being processed. + handshakeBuf []byte + // ticketState is the current processing state of the session tickets. + ticketState sessionTicketState + // sessionTickets holds the completed session tickets until they are sent to + // the handshaker service for processing. + sessionTickets [][]byte + // ticketSender sends session tickets to the S2A handshaker service. + ticketSender s2aTicketSender + // callComplete is a channel that blocks closing the record protocol until a + // pending call to the S2A completes. + callComplete chan bool +} + +// ConnParameters holds the parameters used for creating a new conn object. +type ConnParameters struct { + // NetConn is the TCP connection to the peer. This parameter is required. + NetConn net.Conn + // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker + // service. This parameter is required. + Ciphersuite commonpb.Ciphersuite + // TLSVersion is the TLS version number negotiated by the S2A handshaker + // service. This parameter is required. + TLSVersion commonpb.TLSVersion + // InTrafficSecret is the traffic secret used to derive the session key for + // the inbound direction. This parameter is required. + InTrafficSecret []byte + // OutTrafficSecret is the traffic secret used to derive the session key + // for the outbound direction. This parameter is required. + OutTrafficSecret []byte + // UnusedBuf is the data read from the network that has not yet been + // decrypted. This parameter is optional. If not provided, then no + // application data was sent in the same flight of messages as the final + // handshake message. + UnusedBuf []byte + // InSequence is the sequence number of the next, incoming, TLS record. + // This parameter is required. + InSequence uint64 + // OutSequence is the sequence number of the next, outgoing, TLS record. + // This parameter is required. + OutSequence uint64 + // HSAddr stores the address of the S2A handshaker service. This parameter + // is optional. If not provided, then TLS resumption is disabled. + HSAddr string + // ConnectionId is the connection identifier that was created and sent by + // S2A at the end of a handshake. + ConnectionID uint64 + // LocalIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + LocalIdentity *commonpb.Identity + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// NewConn creates a TLS record protocol that wraps the TCP connection. +func NewConn(o *ConnParameters) (net.Conn, error) { + if o == nil { + return nil, errors.New("conn options must not be nil") + } + if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { + return nil, errors.New("TLS version must be TLS 1.3") + } + + inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) + if err != nil { + return nil, fmt.Errorf("failed to create inbound half connection: %v", err) + } + outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) + if err != nil { + return nil, fmt.Errorf("failed to create outbound half connection: %v", err) + } + + // The tag size for the in/out connections should be the same. + overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() + var unusedBuf []byte + if o.UnusedBuf == nil { + // We pre-allocate unusedBuf to be of size + // 2*tlsRecordMaxSize-1 during initialization. We only read from the + // network into unusedBuf when unusedBuf does not contain a complete + // record and the incomplete record is at most tlsRecordMaxSize-1 + // (bytes). And we read at most tlsRecordMaxSize bytes of data from the + // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 + // is large enough to buffer data read from the network. + unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) + } else { + unusedBuf = make([]byte, len(o.UnusedBuf)) + copy(unusedBuf, o.UnusedBuf) + } + + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + + s2aConn := &conn{ + Conn: o.NetConn, + inConn: inConn, + outConn: outConn, + unusedBuf: unusedBuf, + outRecordsBuf: make([]byte, tlsRecordMaxSize), + nextRecord: unusedBuf, + overheadSize: overheadSize, + ticketState: ticketsNotYetReceived, + // Pre-allocate the buffer for one session ticket message and the max + // plaintext size. This is the largest size that handshakeBuf will need + // to hold. The largest incomplete handshake message is the + // [handshake header size] + [max session ticket size] - 1. + // Then, tlsRecordMaxPlaintextSize is the maximum size that will be + // appended to the handshakeBuf before the handshake message is + // completed. Therefore, the buffer size below should be large enough to + // buffer any handshake messages. + handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), + ticketSender: &ticketSender{ + hsAddr: o.HSAddr, + connectionID: o.ConnectionID, + localIdentity: o.LocalIdentity, + tokenManager: tokenManager, + ensureProcessSessionTickets: o.EnsureProcessSessionTickets, + }, + callComplete: make(chan bool), + } + return s2aConn, nil +} + +// Read reads and decrypts a TLS 1.3 record from the underlying connection, and +// copies any application data received from the peer into b. If the size of the +// payload is greater than len(b), Read retains the remaining bytes in an +// internal buffer, and subsequent calls to Read will read from this buffer +// until it is exhausted. At most 1 TLS record worth of application data is +// written to b for each call to Read. +// +// Note that for the user to efficiently call this method, the user should +// ensure that the buffer b is allocated such that the buffer does not have any +// unused segments. This can be done by calling Read via io.ReadFull, which +// continually calls Read until the specified buffer has been filled. Also note +// that the user should close the connection via Close() if an error is thrown +// by a call to Read. +func (p *conn) Read(b []byte) (n int, err error) { + p.readMutex.Lock() + defer p.readMutex.Unlock() + // Check if p.pendingApplication data has leftover application data from + // the previous call to Read. + if len(p.pendingApplicationData) == 0 { + // Read a full record from the wire. + record, err := p.readFullRecord() + if err != nil { + return 0, err + } + // Now we have a complete record, so split the header and validate it + // The TLS record is split into 2 pieces: the record header and the + // payload. The payload has the following form: + // [payload] = [ciphertext of application data] + // + [ciphertext of record type byte] + // + [(optionally) ciphertext of padding by zeros] + // + [tag] + header, payload, err := splitAndValidateHeader(record) + if err != nil { + return 0, err + } + // Decrypt the ciphertext. + p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) + if err != nil { + return 0, err + } + // Remove the padding by zeros and the record type byte from the + // p.pendingApplicationData buffer. + msgType, err := p.stripPaddingAndType() + if err != nil { + return 0, err + } + // Check that the length of the plaintext after stripping the padding + // and record type byte is under the maximum plaintext size. + if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { + return 0, errors.New("plaintext size larger than maximum") + } + // The expected message types are application data, alert, and + // handshake. For application data, the bytes are directly copied into + // b. For an alert, the type of the alert is checked and the connection + // is closed on a close notify alert. For a handshake message, the + // handshake message type is checked. The handshake message type can be + // a key update type, for which we advance the traffic secret, and a + // new session ticket type, for which we send the received ticket to S2A + // for processing. + switch msgType { + case applicationData: + if len(p.handshakeBuf) > 0 { + return 0, errors.New("application data received while processing fragmented handshake messages") + } + if p.ticketState == receivingTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + case alert: + return 0, p.handleAlertMessage() + case handshake: + if err = p.handleHandshakeMessage(); err != nil { + return 0, err + } + return 0, nil + default: + return 0, errors.New("unknown record type") + } + } + // Write as much application data as possible to b, the output buffer. + n = copy(b, p.pendingApplicationData) + p.pendingApplicationData = p.pendingApplicationData[n:] + return n, nil +} + +// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a +// TLS 1.3 record (of type "application data") from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) Write(b []byte) (n int, err error) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + return p.writeTLSRecord(b, tlsApplicationData) +} + +// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, +// builds a TLS 1.3 record (of type recordType) from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { + // Create a record of only header, record type, and tag if given empty + // byte array. + if len(b) == 0 { + recordEndIndex, _, err := p.buildRecord(b, recordType, 0) + if err != nil { + return 0, err + } + + // Write the bytes stored in outRecordsBuf to p.Conn. Since we return + // the number of plaintext bytes written without overhead, we will + // always return 0 while p.Conn.Write returns the entire record length. + _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + return 0, err + } + + numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) + totalRecordsSize := len(b) + numRecords*p.overheadSize + partialBSize := len(b) + if totalRecordsSize > outBufMaxSize { + totalRecordsSize = outBufMaxSize + partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize + } + if len(p.outRecordsBuf) < totalRecordsSize { + p.outRecordsBuf = make([]byte, totalRecordsSize) + } + for bStart := 0; bStart < len(b); bStart += partialBSize { + bEnd := bStart + partialBSize + if bEnd > len(b) { + bEnd = len(b) + } + partialB := b[bStart:bEnd] + recordEndIndex := 0 + for len(partialB) > 0 { + recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) + if err != nil { + // Return the amount of bytes written prior to the error. + return bStart, err + } + } + // Write the bytes stored in outRecordsBuf to p.Conn. If there is an + // error, calculate the total number of plaintext bytes of complete + // records successfully written to the peer and return it. + nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + if err != nil { + numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) + return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err + } + } + return len(b), nil +} + +// buildRecord builds a TLS 1.3 record of type recordType from plaintext, +// and writes the record to outRecordsBuf at recordStartIndex. The record will +// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the +// index of outRecordsBuf where the current record ends, as well as any +// remaining plaintext bytes. +func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { + // Construct the payload, which consists of application data and record type. + dataLen := len(plaintext) + if dataLen > tlsRecordMaxPlaintextSize { + dataLen = tlsRecordMaxPlaintextSize + } + remainingPlaintext = plaintext[dataLen:] + newRecordBuf := p.outRecordsBuf[recordStartIndex:] + + copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) + newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType + payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. + // Construct the header. + newRecordBuf[0] = tlsApplicationData + newRecordBuf[1] = tlsLegacyRecordVersion + newRecordBuf[2] = tlsLegacyRecordVersion + binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) + header := newRecordBuf[:tlsRecordHeaderSize] + + // Encrypt the payload using header as aad. + encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) + if err != nil { + return 0, plaintext, err + } + recordStartIndex += len(header) + len(encryptedPayload) + return recordStartIndex, remainingPlaintext, nil +} + +func (p *conn) Close() error { + p.readMutex.Lock() + defer p.readMutex.Unlock() + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + // If p.ticketState is equal to notReceivingTickets, then S2A has + // been sent a flight of session tickets, and we must wait for the + // call to S2A to complete before closing the record protocol. + if p.ticketState == notReceivingTickets { + <-p.callComplete + grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") + } + return p.Conn.Close() +} + +// stripPaddingAndType strips the padding by zeros and record type from +// p.pendingApplicationData and returns the record type. Note that +// p.pendingApplicationData should be of the form: +// [application data] + [record type byte] + [trailing zeros] +func (p *conn) stripPaddingAndType() (recordType, error) { + if len(p.pendingApplicationData) == 0 { + return 0, errors.New("application data had length 0") + } + i := len(p.pendingApplicationData) - 1 + // Search for the index of the record type byte. + for i > 0 { + if p.pendingApplicationData[i] != 0 { + break + } + i-- + } + rt := recordType(p.pendingApplicationData[i]) + p.pendingApplicationData = p.pendingApplicationData[:i] + return rt, nil +} + +// readFullRecord reads from the wire until a record is completed and returns +// the full record. +func (p *conn) readFullRecord() (fullRecord []byte, err error) { + fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + // Check whether the next record to be decrypted has been completely + // received. + if len(fullRecord) == 0 { + copy(p.unusedBuf, p.nextRecord) + p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] + // Always copy next incomplete record to the beginning of the + // unusedBuf buffer and reset nextRecord to it. + p.nextRecord = p.unusedBuf + } + // Keep reading from the wire until we have a complete record. + for len(fullRecord) == 0 { + if len(p.unusedBuf) == cap(p.unusedBuf) { + tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) + copy(tmp, p.unusedBuf) + p.unusedBuf = tmp + } + n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) + if err != nil { + return nil, err + } + p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] + fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + } + return fullRecord, nil +} + +// parseReadBuffer parses the provided buffer and returns a full record and any +// remaining bytes in that buffer. If the record is incomplete, nil is returned +// for the first return value and the given byte buffer is returned for the +// second return value. The length of the payload specified by the header should +// not be greater than maxLen, otherwise an error is returned. Note that this +// function does not allocate or copy any buffers. +func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { + // If the header is not complete, return the provided buffer as remaining + // buffer. + if len(b) < tlsRecordHeaderSize { + return nil, b, nil + } + msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] + length := binary.BigEndian.Uint16(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) + } + if len(b) < int(length)+tlsRecordHeaderSize { + // Record is not complete yet. + return nil, b, nil + } + return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil +} + +// splitAndValidateHeader splits the header from the payload in the TLS 1.3 +// record and returns them. Note that the header is checked for validity, and an +// error is returned when an invalid header is parsed. Also note that this +// function does not allocate or copy any buffers. +func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { + if len(record) < tlsRecordHeaderSize { + return nil, nil, fmt.Errorf("record was smaller than the header size") + } + header = record[:tlsRecordHeaderSize] + payload = record[tlsRecordHeaderSize:] + if header[0] != tlsApplicationData { + return nil, nil, fmt.Errorf("incorrect type in the header") + } + // Check the legacy record version, which should be 0x03, 0x03. + if header[1] != 0x03 || header[2] != 0x03 { + return nil, nil, fmt.Errorf("incorrect legacy record version in the header") + } + return header, payload, nil +} + +// handleAlertMessage handles an alert message. +func (p *conn) handleAlertMessage() error { + if len(p.pendingApplicationData) != tlsAlertSize { + return errors.New("invalid alert message size") + } + alertType := p.pendingApplicationData[1] + // Clear the body of the alert message. + p.pendingApplicationData = p.pendingApplicationData[:0] + if alertType == byte(closeNotify) { + return errors.New("received a close notify alert") + } + // TODO(matthewstevenson88): Add support for more alert types. + return fmt.Errorf("received an unrecognized alert type: %v", alertType) +} + +// parseHandshakeHeader parses a handshake message from the handshake buffer. +// It returns the message type, the message length, the message, the raw message +// that includes the type and length bytes and a flag indicating whether the +// handshake message has been fully parsed. i.e. whether the entire handshake +// message was in the handshake buffer. +func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { + // Handle the case where the 4 byte handshake header is fragmented. + if len(p.handshakeBuf) < tlsHandshakePrefixSize { + return 0, 0, nil, nil, false + } + msgType = p.handshakeBuf[0] + msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) + if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { + return 0, 0, nil, nil, false + } + msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] + rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] + p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] + return msgType, msgLen, msg, rawMsg, true +} + +// handleHandshakeMessage handles a handshake message. Note that the first +// complete handshake message from the handshake buffer is removed, if it +// exists. +func (p *conn) handleHandshakeMessage() error { + // Copy the pending application data to the handshake buffer. At this point, + // we are guaranteed that the pending application data contains only parts + // of a handshake message. + p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) + p.pendingApplicationData = p.pendingApplicationData[:0] + // Several handshake messages may be coalesced into a single record. + // Continue reading them until the handshake buffer is empty. + for len(p.handshakeBuf) > 0 { + handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + if !ok { + // The handshake could not be fully parsed, so read in another + // record and try again later. + break + } + switch handshakeMsgType { + case tlsHandshakeKeyUpdateType: + if msgLen != tlsHandshakeKeyUpdateMsgSize { + return errors.New("invalid handshake key update message length") + } + if len(p.handshakeBuf) != 0 { + return errors.New("key update message must be the last message of a handshake record") + } + if err := p.handleKeyUpdateMsg(msg); err != nil { + return err + } + case tlsHandshakeNewSessionTicketType: + // Ignore tickets that are received after a batch of tickets has + // been sent to S2A. + if p.ticketState == notReceivingTickets { + continue + } + if p.ticketState == ticketsNotYetReceived { + p.ticketState = receivingTickets + } + p.sessionTickets = append(p.sessionTickets, rawMsg) + if len(p.sessionTickets) == maxAllowedTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + default: + return errors.New("unknown handshake message type") + } + } + return nil +} + +func buildKeyUpdateRequest() []byte { + b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) + b[0] = tlsHandshakeKeyUpdateType + b[1] = 0 + b[2] = 0 + b[3] = tlsHandshakeKeyUpdateMsgSize + b[4] = byte(updateNotRequested) + return b +} + +// handleKeyUpdateMsg handles a key update message. +func (p *conn) handleKeyUpdateMsg(msg []byte) error { + keyUpdateRequest := msg[0] + if keyUpdateRequest != byte(updateNotRequested) && + keyUpdateRequest != byte(updateRequested) { + return errors.New("invalid handshake key update message") + } + if err := p.inConn.UpdateKey(); err != nil { + return err + } + // Send a key update message back to the peer if requested. + if keyUpdateRequest == byte(updateRequested) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) + if err != nil { + return err + } + if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { + return errors.New("key update request message wrote less bytes than expected") + } + if err = p.outConn.UpdateKey(); err != nil { + return err + } + } + return nil +} + +// bidEndianInt24 converts the given byte buffer of at least size 3 and +// outputs the resulting 24 bit integer as a uint32. This is needed because +// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does +// not provide a way to transform a byte buffer into a 3 byte integer. +func bigEndianInt24(b []byte) uint32 { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/event-exporter/vendor/github.com/google/s2a-go/internal/record/ticketsender.go new file mode 100644 index 000000000..e51199ab3 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package record + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/s2a-go/internal/handshaker/service" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +// sessionTimeout is the timeout for creating a session with the S2A handshaker +// service. +const sessionTimeout = time.Second * 5 + +// s2aTicketSender sends session tickets to the S2A handshaker service. +type s2aTicketSender interface { + // sendTicketsToS2A sends the given session tickets to the S2A handshaker + // service. + sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) +} + +// ticketStream is the stream used to send and receive session information. +type ticketStream interface { + Send(*s2apb.SessionReq) error + Recv() (*s2apb.SessionResp, error) +} + +type ticketSender struct { + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // connectionID is the connection identifier that was created and sent by + // S2A at the end of a handshake. + connectionID uint64 + // localIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + localIdentity *commonpb.Identity + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // ensureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + ensureProcessSessionTickets *sync.WaitGroup +} + +// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker +// service. This is done asynchronously and writes to the error logs if an error +// occurs. +func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { + // Note that the goroutine is in the function rather than at the caller + // because the fake ticket sender used for testing must run synchronously + // so that the session tickets can be accessed from it after the tests have + // been run. + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Add(1) + } + go func() { + if err := func() error { + defer func() { + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Done() + } + }() + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + // The transportCreds only needs to be set when talking to S2AV2 and also + // if mTLS is required. + hsConn, err := service.Dial(ctx, t.hsAddr, nil) + if err != nil { + return err + } + client := s2apb.NewS2AServiceClient(hsConn) + session, err := client.SetUpSession(ctx) + if err != nil { + return err + } + defer func() { + if err := session.CloseSend(); err != nil { + grpclog.Error(err) + } + }() + return t.writeTicketsToStream(session, sessionTickets) + }(); err != nil { + grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", + t.localIdentity, err) + } + callComplete <- true + close(callComplete) + }() +} + +// writeTicketsToStream writes the given session tickets to the given stream. +func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { + if err := stream.Send( + &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ResumptionTicket{ + ResumptionTicket: &s2apb.ResumptionTicketReq{ + InBytes: sessionTickets, + ConnectionId: t.connectionID, + LocalIdentity: t.localIdentity, + }, + }, + AuthMechanisms: t.getAuthMechanisms(), + }, + ); err != nil { + return err + } + sessionResp, err := stream.Recv() + if err != nil { + return err + } + if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { + return fmt.Errorf("s2a session ticket response had error status: %v, %v", + sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) + } + return nil +} + +func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if t.tokenManager == nil { + return nil + } + // First handle the special case when no local identity has been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if t.localIdentity == nil { + token, err := t.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has specified + // a local identity. + token, err := t.tokenManager.Token(t.localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + Identity: t.localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/event-exporter/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go new file mode 100644 index 000000000..ec96ba3b6 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tokenmanager provides tokens for authenticating to S2A. +package tokenmanager + +import ( + "fmt" + "os" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +const ( + s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" +) + +// AccessTokenManager manages tokens for authenticating to S2A. +type AccessTokenManager interface { + // DefaultToken returns a token that an application with no specified local + // identity must use to authenticate to S2A. + DefaultToken() (token string, err error) + // Token returns a token that an application with local identity equal to + // identity must use to authenticate to S2A. + Token(identity *commonpb.Identity) (token string, err error) +} + +type singleTokenAccessTokenManager struct { + token string +} + +// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance +// that will always manage the same token. +// +// The token to be managed is read from the s2aAccessTokenEnvironmentVariable +// environment variable. If this environment variable is not set, then this +// function returns an error. +func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { + token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) + if !variableExists { + return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) + } + return &singleTokenAccessTokenManager{token: token}, nil +} + +// DefaultToken always returns the token managed by the +// singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { + return m.token, nil +} + +// Token always returns the token managed by the singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { + return m.token, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/README.md b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/README.md new file mode 100644 index 000000000..3806d1e9c --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/README.md @@ -0,0 +1 @@ +**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go new file mode 100644 index 000000000..cc811879b --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certverifier offloads verifications to S2Av2. +package certverifier + +import ( + "crypto/x509" + "fmt" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ + ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ + CertificateChain: rawCerts, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} + +// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ + ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ + CertificateChain: rawCerts, + ServerHostname: hostname, + SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..958f3cfaddf3645fa6c0578b5b6955d65ac4c172 GIT binary patch literal 998 zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ zG%<29H8CnfQWlH+MyjTjz z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y HTP6bly#{pd literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..d2817641bafb022339926786ab85b545f40ac665 GIT binary patch literal 1147 zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..d8c3710c85f9ff41ddfc709924c866350a727a4f GIT binary patch literal 1013 zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..dae619c097512f20d09d2054c63fc0f715d7be24 GIT binary patch literal 998 zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR KZE!sDzAgX``fm0B literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..ce7f8d31d6802c7e68c188af8797c3a063894857 GIT binary patch literal 1147 zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 GIT binary patch literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf literal 0 HcmV?d00001 diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem new file mode 100644 index 000000000..493a5a264 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem new file mode 100644 index 000000000..55a7f10c7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/event-exporter/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der new file mode 100644 index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 GIT binary patch literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta 0 { + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) + if err != nil { + return nil, err + } + + // Create mTLS credentials for client. + config := &tls.Config{ + VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), + ServerName: serverHostname, + InsecureSkipVerify: true, // NOLINT + ClientSessionCache: nil, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + } + if len(tlsConfig.CertificateChain) > 0 { + config.Certificates = []tls.Certificate{cert} + } + return config, nil +} + +// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { + return &tls.Config{ + GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), + }, nil +} + +// ClientConfig builds a TLS config for a server to establish a secure +// connection with a client, based on SNI communicated during ClientHello. +// Ensures that server presents the correct certificate to establish a TLS +// connection. +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) + if err != nil { + return nil, err + } + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) + if err != nil { + return nil, err + } + + clientAuth := getTLSClientAuthType(tlsConfig) + + var cipherSuites []uint16 + cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) + + // Create mTLS credentials for server. + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), + ClientAuth: clientAuth, + CipherSuites: cipherSuites, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + }, nil + } +} + +func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { + var tlsGoCipherSuites []uint16 + for _, v := range tlsConfigCipherSuites { + s := getTLSCipherSuite(v) + if s != 0xffff { + tlsGoCipherSuites = append(tlsGoCipherSuites, s) + } + } + return tlsGoCipherSuites +} + +func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { + switch tlsCipherSuite { + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + default: + return 0xffff + } +} + +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { + authMechanisms := getAuthMechanisms(tokenManager, localIdentities) + var locID *commonpbv1.Identity + if localIdentities != nil { + locID = localIdentities[0] + } + + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: locID, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, + Sni: sni, + }, + }, + }); err != nil { + return nil, err + } + + resp, err := s2AStream.Recv() + if err != nil { + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil +} + +func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { + var clientAuth tls.ClientAuthType + switch x := tlsConfig.RequestClientCertificate; x { + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: + clientAuth = tls.NoClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequestClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.VerifyClientCertIfGiven. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.RequireAndVerifyClientCert. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + default: + clientAuth = tls.RequireAnyClientCert + } + return clientAuth +} + +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { + if tokenManager == nil { + return nil + } + if len(localIdentities) == 0 { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get token for empty local identity: %v", err) + return nil + } + return []*s2av2pb.AuthenticationMechanism{ + { + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + var authMechanisms []*s2av2pb.AuthenticationMechanism + for _, localIdentity := range localIdentities { + if localIdentity == nil { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } else { + token, err := tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } + } + return authMechanisms +} + +// TODO(rmehta19): refactor switch statements into a helper function. +func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} + +func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/retry/retry.go b/event-exporter/vendor/github.com/google/s2a-go/retry/retry.go new file mode 100644 index 000000000..f7e0a2377 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/retry/retry.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package retry provides a retry helper for talking to S2A gRPC server. +// The implementation is modeled after +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go +package retry + +import ( + "context" + "math/rand" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + maxRetryAttempts = 5 + maxRetryForLoops = 10 +) + +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +// Pause returns a duration, which is used as the backoff wait time +// before the next retry. +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// Sleep will wait for the specified duration or return on context +// expiration. +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// NewRetryer creates an instance of S2ARetryer using the defaultBackoff +// implementation. +var NewRetryer = func() *S2ARetryer { + return &S2ARetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +// S2ARetryer implements a retry helper for talking to S2A gRPC server. +type S2ARetryer struct { + bo backoff + attempts int +} + +// Attempts return the number of retries attempted. +func (r *S2ARetryer) Attempts() int { + return r.attempts +} + +// Retry returns a boolean indicating whether retry should be performed +// and the backoff duration. +func (r *S2ARetryer) Retry(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + if r.attempts >= maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +// Run uses S2ARetryer to execute the function passed in, until success or reaching +// max number of retry attempts. +func Run(ctx context.Context, f func() error) { + retryer := NewRetryer() + forLoopCnt := 0 + var err error + for { + err = f() + if bo, shouldRetry := retryer.Retry(err); shouldRetry { + if grpclog.V(1) { + grpclog.Infof("will attempt retry: %v", err) + } + if ctx.Err() != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to context error: %v", ctx.Err()) + } + break + } + if errSleep := Sleep(ctx, bo); errSleep != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to sleep error: %v", errSleep) + } + break + } + // This shouldn't happen, just make sure we are not stuck in the for loops. + forLoopCnt++ + if forLoopCnt > maxRetryForLoops { + if grpclog.V(1) { + grpclog.Infof("exit the for loop after too many retries") + } + break + } + continue + } + if grpclog.V(1) { + grpclog.Infof("retry conditions not met, exit the loop") + } + break + } +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/s2a.go b/event-exporter/vendor/github.com/google/s2a-go/s2a.go new file mode 100644 index 000000000..5ecb06f93 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/s2a.go @@ -0,0 +1,427 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package s2a provides the S2A transport credentials used by a gRPC +// application. +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2" + "github.com/google/s2a-go/retry" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + // defaultTimeout specifies the default server handshake timeout. + defaultTimeout = 30.0 * time.Second +) + +// s2aTransportCreds are the transport credentials required for establishing +// a secure connection using the S2A. They implement the +// credentials.TransportCredentials interface. +type s2aTransportCreds struct { + info *credentials.ProtocolInfo + minTLSVersion commonpb.TLSVersion + maxTLSVersion commonpb.TLSVersion + // tlsCiphersuites contains the ciphersuites used in the S2A connection. + // Note that these are currently unconfigurable. + tlsCiphersuites []commonpb.Ciphersuite + // localIdentity should only be used by the client. + localIdentity *commonpb.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpb.Identity + // targetIdentities should only be used by the client. + targetIdentities []*commonpb.Identity + isClient bool + s2aAddr string + ensureProcessSessionTickets *sync.WaitGroup +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2A to establish a secure connection with a server. +func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil client options") + } + var targetIdentities []*commonpb.Identity + for _, targetIdentity := range opts.TargetIdentities { + protoTargetIdentity, err := toProtoIdentity(targetIdentity) + if err != nil { + return nil, err + } + targetIdentities = append(targetIdentities, protoTargetIdentity) + } + localIdentity, err := toProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentity: localIdentity, + targetIdentities: targetIdentities, + isClient: true, + s2aAddr: opts.S2AAddress, + ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + var fallbackFunc fallback.ClientHandshake + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { + fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2A to establish a secure connection with a client. +func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil server options") + } + var localIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + localIdentities = append(localIdentities, protoLocalIdentity) + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentities: localIdentities, + isClient: false, + s2aAddr: opts.S2AAddress, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) +} + +// ClientHandshake initiates a client-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + // Connect to the S2A. + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + opts := &handshaker.ClientHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + TargetIdentities: c.targetIdentities, + LocalIdentity: c.localIdentity, + TargetName: serverAuthority, + EnsureProcessSessionTickets: c.ensureProcessSessionTickets, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := chs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := chs.ClientHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +// ServerHandshake initiates a server-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + + // Connect to the S2A. + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + opts := &handshaker.ServerHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + LocalIdentities: c.localIdentities, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := shs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := shs.ServerHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + var localIdentity *commonpb.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + } + var localIdentities []*commonpb.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + } + } + var targetIdentities []*commonpb.Identity + if c.targetIdentities != nil { + targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + for i, targetIdentity := range c.targetIdentities { + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + } + } + return &s2aTransportCreds{ + info: &info, + minTLSVersion: c.minTLSVersion, + maxTLSVersion: c.maxTLSVersion, + tlsCiphersuites: c.tlsCiphersuites, + localIdentity: localIdentity, + localIdentities: localIdentities, + targetIdentities: targetIdentities, + isClient: c.isClient, + s2aAddr: c.s2aAddr, + } +} + +func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { + c.info.ServerName = serverNameOverride + return nil +} + +// TLSClientConfigOptions specifies parameters for creating client TLS config. +type TLSClientConfigOptions struct { + // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. + // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ + // ServerName: "example.com", + // }) + ServerName string +} + +// TLSClientConfigFactory defines the interface for a client TLS config factory. +type TLSClientConfigFactory interface { + Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) +} + +// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. +func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { + if opts == nil { + return nil, fmt.Errorf("opts must be non-nil") + } + if opts.EnableLegacyMode { + return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + // The only possible error is: access token not set in the environment, + // which is okay in environments other than serverless. + grpclog.Infof("Access token manager not initialized: %v", err) + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, + tokenManager: nil, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil + } + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, + tokenManager: tokenManager, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil +} + +type s2aTLSClientConfigFactory struct { + s2av2Address string + transportCreds credentials.TransportCredentials + tokenManager tokenmanager.AccessTokenManager + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + serverAuthorizationPolicy []byte +} + +func (f *s2aTLSClientConfigFactory) Build( + ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { + serverName := "" + if opts != nil && opts.ServerName != "" { + serverName = opts.ServerName + } + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) +} + +func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { + switch verificationMode { + case ConnectToGoogle: + return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE + case Spiffe: + return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + default: + return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED + } +} + +// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. +// Example use with http.RoundTripper: +// +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// }) +// transport := http.DefaultTransport +// transport.DialTLSContext = dialTLSContext +func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { + + return func(ctx context.Context, network, addr string) (net.Conn, error) { + + fallback := func(err error) (net.Conn, error) { + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && + opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { + fbDialer := opts.FallbackOpts.FallbackDialer + grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) + fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) + if fbErr != nil { + return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) + } + return fbConn, nil + } + return nil, err + } + + factory, err := NewTLSClientConfigFactory(opts) + if err != nil { + grpclog.Infof("error creating S2A client config factory: %v", err) + return fallback(err) + } + + serverName, _, err := net.SplitHostPort(addr) + if err != nil { + serverName = addr + } + timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) + defer cancel() + + var s2aTLSConfig *tls.Config + retry.Run(timeoutCtx, + func() error { + s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + return err + }) + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return fallback(err) + } + + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } + var c net.Conn + retry.Run(timeoutCtx, + func() error { + c, err = s2aDialer.DialContext(timeoutCtx, network, addr) + return err + }) + if err != nil { + grpclog.Infof("error dialing with S2A to %s: %v", addr, err) + return fallback(err) + } + grpclog.Infof("success dialing MTLS to %s with S2A", addr) + return c, nil + } +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/s2a_options.go b/event-exporter/vendor/github.com/google/s2a-go/s2a_options.go new file mode 100644 index 000000000..fcdbc1621 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/s2a_options.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "sync" + + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/credentials" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +// Identity is the interface for S2A identities. +type Identity interface { + // Name returns the name of the identity. + Name() string +} + +type spiffeID struct { + spiffeID string +} + +func (s *spiffeID) Name() string { return s.spiffeID } + +// NewSpiffeID creates a SPIFFE ID from id. +func NewSpiffeID(id string) Identity { + return &spiffeID{spiffeID: id} +} + +type hostname struct { + hostname string +} + +func (h *hostname) Name() string { return h.hostname } + +// NewHostname creates a hostname from name. +func NewHostname(name string) Identity { + return &hostname{hostname: name} +} + +type uid struct { + uid string +} + +func (h *uid) Name() string { return h.uid } + +// NewUID creates a UID from name. +func NewUID(name string) Identity { + return &uid{uid: name} +} + +// VerificationModeType specifies the mode that S2A must use to verify the peer +// certificate chain. +type VerificationModeType int + +// Three types of verification modes. +const ( + Unspecified = iota + ConnectToGoogle + Spiffe +) + +// ClientOptions contains the client-side options used to establish a secure +// channel using the S2A handshaker service. +type ClientOptions struct { + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity, if one exists. + LocalIdentity Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials + // EnsureProcessSessionTickets waits for all session tickets to be sent to + // S2A before a process completes. + // + // This functionality is crucial for processes that complete very soon after + // using S2A to establish a TLS connection, but it can be ignored for longer + // lived processes. + // + // Usage example: + // func main() { + // var ensureProcessSessionTickets sync.WaitGroup + // clientOpts := &s2a.ClientOptions{ + // EnsureProcessSessionTickets: &ensureProcessSessionTickets, + // // Set other members. + // } + // creds, _ := s2a.NewClientCreds(clientOpts) + // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + // defer conn.Close() + // + // // Make RPC call. + // + // // The process terminates right after the RPC call ends. + // // ensureProcessSessionTickets can be used to ensure resumption + // // tickets are fully processed. If the process is long-lived, using + // // ensureProcessSessionTickets is not necessary. + // ensureProcessSessionTickets.Wait() + // } + EnsureProcessSessionTickets *sync.WaitGroup + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Optional fallback after dialing with S2A fails. + FallbackOpts *FallbackOptions + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + + // Serialized user specified policy for server authorization. + serverAuthorizationPolicy []byte +} + +// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. +type FallbackOptions struct { + // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). + // It will be called by ClientHandshake function, after handshake with S2A fails. + // s2a.NewClientCreds() ignores the other FallbackDialer field. + FallbackClientHandshakeFunc fallback.ClientHandshake + + // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). + // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. + // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. + FallbackDialer *FallbackDialer +} + +// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. +type FallbackDialer struct { + // Dialer specifies a fallback tls.Dialer. + Dialer *tls.Dialer + // ServerAddr is used by Dialer to establish fallback connection. + ServerAddr string +} + +// DefaultClientOptions returns the default client options. +func DefaultClientOptions(s2aAddress string) *ClientOptions { + return &ClientOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +// ServerOptions contains the server-side options used to establish a secure +// channel using the S2A handshaker service. +type ServerOptions struct { + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity, if one exists. + LocalIdentities []Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +} + +// DefaultServerOptions returns the default server options. +func DefaultServerOptions(s2aAddress string) *ServerOptions { + return &ServerOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/s2a_utils.go b/event-exporter/vendor/github.com/google/s2a-go/s2a_utils.go new file mode 100644 index 000000000..d649cc461 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/s2a_utils.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" +) + +// AuthInfo exposes security information from the S2A to the application. +type AuthInfo interface { + // AuthType returns the authentication type. + AuthType() string + // ApplicationProtocol returns the application protocol, e.g. "grpc". + ApplicationProtocol() string + // TLSVersion returns the TLS version negotiated during the handshake. + TLSVersion() commonpb.TLSVersion + // Ciphersuite returns the ciphersuite negotiated during the handshake. + Ciphersuite() commonpb.Ciphersuite + // PeerIdentity returns the authenticated identity of the peer. + PeerIdentity() *commonpb.Identity + // LocalIdentity returns the local identity of the application used during + // session setup. + LocalIdentity() *commonpb.Identity + // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in + // the S2A handshake. + PeerCertFingerprint() []byte + // LocalCertFingerprint returns the SHA256 hash of the local certificate used + // in the S2A handshake. + LocalCertFingerprint() []byte + // IsHandshakeResumed returns true if a cached session was used to resume + // the handshake. + IsHandshakeResumed() bool + // SecurityLevel returns the security level of the connection. + SecurityLevel() credentials.SecurityLevel +} + +// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given +// peer, if it exists. This API should be used by gRPC clients after +// obtaining a peer object using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no S2AAuthInfo found in Peer") + } + return s2aAuthInfo, nil +} + +// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given +// context, if it exists. This API should be used by gRPC server RPC handlers +// to get information about the peer. On the client-side, use the grpc.Peer() +// CallOption and the AuthInfoFromPeer function. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/event-exporter/vendor/github.com/google/s2a-go/stream/s2a_stream.go new file mode 100644 index 000000000..584bf32b1 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stream provides an interface for bidirectional streaming to the S2A server. +package stream + +import ( + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. +type S2AStream interface { + // Send sends the message to the S2A server. + Send(*s2av2pb.SessionReq) error + // Recv receives the message from the S2A server. + Recv() (*s2av2pb.SessionResp, error) + // Closes the channel to the S2A server. + CloseSend() error +} diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/client_cert.pem new file mode 100644 index 000000000..493a5a264 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/client_key.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/client_key.pem new file mode 100644 index 000000000..55a7f10c7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem new file mode 100644 index 000000000..60c4cf069 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD +VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy +MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl +c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn +3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO +7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb +A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T +cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO +VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL +dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI +je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ +l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 +YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM +E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK +BTq2PBvc59T6OFLq +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem new file mode 100644 index 000000000..9d112d1e9 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH +65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM +FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 +rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 +M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf +2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 +ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA +NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 +LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT +EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW +/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ +XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI +wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU +lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC +k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK +UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB +8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o +4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB +Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs +FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv +r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap +CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 +w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr +63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 +Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r +fVMrcL3jSf/W1Xh4TgtyoU8= +-----END PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem new file mode 100644 index 000000000..44e436f6e --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 +MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE +CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW +QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 +r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg +Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n +rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe +d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR +MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ +yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN +XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x +fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN +9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa +VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G +MTV7jmY9TBPtfhRuO/cG650+F+cw +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem new file mode 100644 index 000000000..68c606134 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 +MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE +AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh +HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn +H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK +GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA +Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe +LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA +AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G +A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB +AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT +EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn +JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe +2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs +HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI +4Wcvfz/isxgmH1UqIt3oc6ad +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem new file mode 100644 index 000000000..b14ad0f72 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 +GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza +ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 +KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 +CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 +CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd +Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz +yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ +Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V +4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY +QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy +0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp +2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms +GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz +wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ +SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 +cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f +R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn +htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi +AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw +O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh +cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 +EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw +SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x +gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe +1ChfPP1AH+/75MJCvu6wQBQv +-----END PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem new file mode 100644 index 000000000..ad1bad598 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE +AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 +MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE +CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe +2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW +HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB +cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 +5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z +ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA +ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG +lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ +XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt +Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 +ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU +gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem new file mode 100644 index 000000000..bcf08e4f1 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS +xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR +4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS +n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp +B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 +vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN +DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S +pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj +ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF +p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn +r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B +7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ +Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 +fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz +1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk +emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP +ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw +Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF +vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD +B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh +eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi +elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 +Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo +mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk +k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ +8x3gNkxJRb4NaLIoNzAhCoo= +-----END PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/server_cert.pem new file mode 100644 index 000000000..0f98322c7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/event-exporter/vendor/github.com/google/s2a-go/testdata/server_key.pem b/event-exporter/vendor/github.com/google/s2a-go/testdata/server_key.pem new file mode 100644 index 000000000..81afea783 --- /dev/null +++ b/event-exporter/vendor/github.com/google/s2a-go/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/event-exporter/vendor/github.com/google/uuid/CHANGELOG.md b/event-exporter/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 000000000..7ec5ac7ea --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/event-exporter/vendor/github.com/google/uuid/CONTRIBUTING.md b/event-exporter/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..a502fdc51 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/event-exporter/vendor/github.com/google/uuid/CONTRIBUTORS b/event-exporter/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/event-exporter/vendor/github.com/google/uuid/LICENSE b/event-exporter/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/event-exporter/vendor/github.com/google/uuid/README.md b/event-exporter/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..3e9a61889 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/event-exporter/vendor/github.com/google/uuid/dce.go b/event-exporter/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/event-exporter/vendor/github.com/google/uuid/doc.go b/event-exporter/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/event-exporter/vendor/github.com/google/uuid/hash.go b/event-exporter/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..dc60082d3 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/event-exporter/vendor/github.com/google/uuid/marshal.go b/event-exporter/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/node.go b/event-exporter/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/event-exporter/vendor/github.com/google/uuid/node_js.go b/event-exporter/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..b2a0bc871 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/event-exporter/vendor/github.com/google/uuid/node_net.go b/event-exporter/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/null.go b/event-exporter/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/event-exporter/vendor/github.com/google/uuid/sql.go b/event-exporter/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/time.go b/event-exporter/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..c35112927 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/time.go @@ -0,0 +1,134 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. +func (uuid UUID) Time() Time { + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/event-exporter/vendor/github.com/google/uuid/util.go b/event-exporter/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/event-exporter/vendor/github.com/google/uuid/uuid.go b/event-exporter/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..5232b4867 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/event-exporter/vendor/github.com/google/uuid/version1.go b/event-exporter/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/version4.go b/event-exporter/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/version6.go b/event-exporter/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/event-exporter/vendor/github.com/google/uuid/version7.go b/event-exporter/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..3167b643d --- /dev/null +++ b/event-exporter/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 000000000..ea5beb5aa --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,219 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "errors" + "fmt" + "io" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" +const encryptAPI = "EnterpriseCertSigner.Encrypt" +const decryptAPI = "EnterpriseCertSigner.Decrypt" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(crypto.SHA384) + gob.Register(crypto.SHA512) + gob.Register(&rsa.PSSOptions{}) + gob.Register(&rsa.OAEPOptions{}) +} + +// SignArgs contains arguments for a Sign API call. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing. Must implement HashFunc(). +} + +// EncryptArgs contains arguments for an Encrypt API call. +type EncryptArgs struct { + Plaintext []byte // The plaintext to encrypt. + Opts any // Options for encryption. Ex: an instance of crypto.Hash. +} + +// DecryptArgs contains arguments to for a Decrypt API call. +type DecryptArgs struct { + Ciphertext []byte // The ciphertext to decrypt. + Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + _ = k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts. +func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) { + err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext) + return +} + +// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface. +func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext) + return +} + +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + + return k, nil +} diff --git a/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 000000000..f374a7f55 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,100 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" +) + +const configFileName = "certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + ECP string `json:"ecp"` +} + +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } + return "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.ECP + if signerBinaryPath == "" { + return "", ErrConfigUnavailable + } + + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir()) + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir()) + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } + return filepath.Join(guessHomeDir(), ".config/gcloud") +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} + +// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG +func GetConfigFilePathFromEnv() (path string) { + return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG") +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/event-exporter/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 000000000..34107104e --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.12.1" +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/event-exporter/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 000000000..e16ab6033 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,114 @@ +# Changelog + +## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) + + +### Bug Fixes + +* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) + +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 869379da9..d785a065c 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -28,18 +28,25 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package apierror implements a wrapper error for parsing error details from -// API calls. Currently, only errors representing a gRPC status are supported. +// API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. package apierror import ( + "errors" "fmt" "strings" jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" "google.golang.org/api/googleapi" "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -59,6 +66,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { @@ -171,12 +202,12 @@ func (a *APIError) Unwrap() error { // Error returns a readable representation of the APIError. func (a *APIError) Error() string { var msg string - if a.status != nil { - msg = a.err.Error() - } else if a.httpErr != nil { + if a.httpErr != nil { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } @@ -207,29 +238,53 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) - herr, isHTTPErr := err.(*googleapi.Error) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some @@ -294,3 +349,13 @@ func parseHTTPDetails(gae *googleapi.Error) ErrDetails { return parseDetails(details) } + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 000000000..e4b03f161 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 000000000..21678ae65 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go index 27b34c06e..7dd9b8373 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -14,9 +14,9 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.0 // protoc v3.15.8 -// source: error.proto +// source: apierror/internal/proto/error.proto package jsonerror @@ -55,7 +55,7 @@ type Error struct { func (x *Error) Reset() { *x = Error{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -68,7 +68,7 @@ func (x *Error) String() string { func (*Error) ProtoMessage() {} func (x *Error) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -81,7 +81,7 @@ func (x *Error) ProtoReflect() protoreflect.Message { // Deprecated: Use Error.ProtoReflect.Descriptor instead. func (*Error) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} } func (x *Error) GetError() *Error_Status { @@ -112,7 +112,7 @@ type Error_Status struct { func (x *Error_Status) Reset() { *x = Error_Status{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +125,7 @@ func (x *Error_Status) String() string { func (*Error_Status) ProtoMessage() {} func (x *Error_Status) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +138,7 @@ func (x *Error_Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. func (*Error_Status) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0, 0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} } func (x *Error_Status) GetCode() int32 { @@ -169,53 +169,55 @@ func (x *Error_Status) GetDetails() []*anypb.Any { return nil } -var File_error_proto protoreflect.FileDescriptor +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor -var file_error_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, - 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, - 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - file_error_proto_rawDescOnce sync.Once - file_error_proto_rawDescData = file_error_proto_rawDesc + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc ) -func file_error_proto_rawDescGZIP() []byte { - file_error_proto_rawDescOnce.Do(func() { - file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData) +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) }) - return file_error_proto_rawDescData + return file_apierror_internal_proto_error_proto_rawDescData } -var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_error_proto_goTypes = []interface{}{ +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ (*Error)(nil), // 0: error.Error (*Error_Status)(nil), // 1: error.Error.Status (code.Code)(0), // 2: google.rpc.Code (*anypb.Any)(nil), // 3: google.protobuf.Any } -var file_error_proto_depIdxs = []int32{ +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ 1, // 0: error.Error.error:type_name -> error.Error.Status 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any @@ -226,13 +228,13 @@ var file_error_proto_depIdxs = []int32{ 0, // [0:3] is the sub-list for field type_name } -func init() { file_error_proto_init() } -func file_error_proto_init() { - if File_error_proto != nil { +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error); i { case 0: return &v.state @@ -244,7 +246,7 @@ func file_error_proto_init() { return nil } } - file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error_Status); i { case 0: return &v.state @@ -261,18 +263,18 @@ func file_error_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_error_proto_rawDesc, + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_error_proto_goTypes, - DependencyIndexes: file_error_proto_depIdxs, - MessageInfos: file_error_proto_msgTypes, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, }.Build() - File_error_proto = out.File - file_error_proto_rawDesc = nil - file_error_proto_goTypes = nil - file_error_proto_depIdxs = nil + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil } diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/call_option.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/call_option.go index 425a7668d..c52e03f64 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -30,9 +30,11 @@ package gax import ( + "errors" "math/rand" "time" + "google.golang.org/api/googleapi" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -119,6 +121,41 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) { return 0, false } +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + // Backoff implements exponential backoff. The wait time between retries is a // random value between 0 and the "retry period" - the time between retries. The // retry period starts at Initial and increases by the factor of Multiplier @@ -173,11 +210,43 @@ func (o grpcOpt) Resolve(s *CallSettings) { s.GRPC = o } +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + // WithGRPCOptions allows passing gRPC call options during client creation. func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) } +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + // CallSettings allow fine-grained control over how calls are made. type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. @@ -186,4 +255,11 @@ type CallSettings struct { // CallOptions to be forwarded to GRPC. GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration } diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 000000000..9aab3d91f --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,82 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] + // that specifies the response read mask. The value(s) for this header + // must adhere to format described in [fieldmaskpb]. + // + // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters + // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb + XGoogFieldMaskHeader = "x-goog-fieldmask" + + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/content_type.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 000000000..1b53d0a3a --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/gax.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/gax.go index f634b4372..36cdfa33e 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/gax.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -35,5 +35,7 @@ // to simplify code generation and to provide more convenient and idiomatic API surfaces. package gax +import "github.com/googleapis/gax-go/v2/internal" + // Version specifies the gax-go version being used. -const Version = "2.1.1" +const Version = internal.Version diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/header.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/header.go index 139371a0b..3e53729e5 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/header.go @@ -29,9 +29,83 @@ package gax -import "bytes" +import ( + "bytes" + "context" + "fmt" + "net/http" + "runtime" + "strings" + "unicode" -// XGoogHeader is for use by the Google Cloud Libraries only. + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} + +// XGoogHeader is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. // // XGoogHeader formats key-value pairs. // The resulting string is suitable for x-goog-api-client header. @@ -51,3 +125,49 @@ func XGoogHeader(keyval ...string) string { } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. See package [github.com/googleapis/gax-go/v2/callctx] for help +// setting/retrieving request/response headers. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 000000000..890d4819e --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.12.1" diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/invoke.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/invoke.go index 9fcc29959..721d1af55 100644 --- a/event-exporter/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -68,6 +68,16 @@ type sleeper func(ctx context.Context, d time.Duration) error // invoke implements Invoke, taking an additional sleeper argument for testing. func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + for { err := call(ctx, settings) if err == nil { diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/event-exporter/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 000000000..cc4486eb9 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/event-exporter/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/event-exporter/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 000000000..61ee266a1 --- /dev/null +++ b/event-exporter/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/event-exporter/vendor/go.opencensus.io/Makefile b/event-exporter/vendor/go.opencensus.io/Makefile index b3ce3df30..d896edc99 100644 --- a/event-exporter/vendor/go.opencensus.io/Makefile +++ b/event-exporter/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/event-exporter/vendor/go.opencensus.io/opencensus.go b/event-exporter/vendor/go.opencensus.io/opencensus.go index e5e4b4368..11e31f421 100644 --- a/event-exporter/vendor/go.opencensus.io/opencensus.go +++ b/event-exporter/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/event-exporter/vendor/go.opencensus.io/plugin/ochttp/server.go b/event-exporter/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea64235..f7c8434be 100644 --- a/event-exporter/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/event-exporter/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/event-exporter/vendor/go.opencensus.io/stats/doc.go b/event-exporter/vendor/go.opencensus.io/stats/doc.go index 00d473ee0..31477a464 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/doc.go +++ b/event-exporter/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/event-exporter/vendor/go.opencensus.io/stats/internal/record.go b/event-exporter/vendor/go.opencensus.io/stats/internal/record.go index 36935e629..436dc791f 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/internal/record.go +++ b/event-exporter/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/event-exporter/vendor/go.opencensus.io/stats/record.go b/event-exporter/vendor/go.opencensus.io/stats/record.go index 2b9728346..8b5b99803 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/record.go +++ b/event-exporter/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/event-exporter/vendor/go.opencensus.io/stats/view/aggregation.go b/event-exporter/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568c..61f72d20d 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/event-exporter/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/event-exporter/vendor/go.opencensus.io/stats/view/collector.go b/event-exporter/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2..bcd6e08c7 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/view/collector.go +++ b/event-exporter/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/event-exporter/vendor/go.opencensus.io/stats/view/doc.go b/event-exporter/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1f..60bf0e392 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/view/doc.go +++ b/event-exporter/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/event-exporter/vendor/go.opencensus.io/stats/view/worker.go b/event-exporter/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f..6a79cd8a3 100644 --- a/event-exporter/vendor/go.opencensus.io/stats/view/worker.go +++ b/event-exporter/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/event-exporter/vendor/go.opencensus.io/tag/profile_19.go b/event-exporter/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34..8fb17226f 100644 --- a/event-exporter/vendor/go.opencensus.io/tag/profile_19.go +++ b/event-exporter/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/event-exporter/vendor/go.opencensus.io/tag/profile_not19.go b/event-exporter/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56..e28cf13cd 100644 --- a/event-exporter/vendor/go.opencensus.io/tag/profile_not19.go +++ b/event-exporter/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/event-exporter/vendor/go.opencensus.io/trace/doc.go b/event-exporter/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f3..7a1616a55 100644 --- a/event-exporter/vendor/go.opencensus.io/trace/doc.go +++ b/event-exporter/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/event-exporter/vendor/go.opencensus.io/trace/lrumap.go b/event-exporter/vendor/go.opencensus.io/trace/lrumap.go index 908c2497e..80095a5f6 100644 --- a/event-exporter/vendor/go.opencensus.io/trace/lrumap.go +++ b/event-exporter/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/event-exporter/vendor/go.opencensus.io/trace/trace_go11.go b/event-exporter/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf28..b8fc1e495 100644 --- a/event-exporter/vendor/go.opencensus.io/trace/trace_go11.go +++ b/event-exporter/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/event-exporter/vendor/go.opencensus.io/trace/trace_nongo11.go b/event-exporter/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859..da488fc87 100644 --- a/event-exporter/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/event-exporter/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 000000000..92b8cf73c --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 000000000..c6f438774 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + // Deprecated: This field is unused. + RequestCount = "http.server.request_count" // Incoming request count total + // Deprecated: Use of this field has been migrated to serverRequestSize. It will be removed in a future version. + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + // Deprecated: Use of this field has been migrated to serverResponseSize. It will be removed in a future version. + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + // Deprecated: Use of this field has been migrated to serverDuration. It will be removed in a future version. + ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Outgoing request bytes total + clientResponseSize = "http.client.response.size" // Outgoing response bytes total + clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 000000000..a1b5b5e5a --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 000000000..38c7f01c7 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 000000000..3d292dab6 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,283 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + var err error + h.requestBytesCounter, err = h.meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + h.responseBytesCounter, err = h.meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + h.serverLatencyMeasure, err = h.meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.requestBytesCounter.Add(ctx, bw.read, o) + h.responseBytesCounter.Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.serverLatencyMeasure.Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 000000000..edf4ce3d3 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 000000000..495d700cf --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,587 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the +// related values are defined in req: "net.peer.port". +func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { + return hc.ClientRequestMetrics(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + HTTPClientIPKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + UserAgentOriginalKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "user_agent.original". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + return attrs +} + +// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the related values +// are defined in req: "net.peer.port". +func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) + + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 000000000..d3a06e0ca --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,215 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 000000000..26a51a180 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 000000000..8d850df3b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,286 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel/metric" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span and enriches it with metrics. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span, injects the span context into the outbound request headers, +// and enriches it with metrics. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + t.createMeasures() + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.meter = c.Meter + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func (t *Transport) createMeasures() { + var err error + t.requestBytesCounter, err = t.meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + t.responseBytesCounter, err = t.meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + t.latencyMeasure, err = t.meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + requestStartTime := time.Now() + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + + // use a body wrapper to determine the request size + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + // noop to prevent nil panic. not using this record fun yet. + bw.record = func(int64) {} + r.Body = &bw + } + + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + // metrics + metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + if res.StatusCode > 0 { + metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) + } + o := metric.WithAttributes(metricAttrs...) + t.requestBytesCounter.Add(ctx, bw.read, o) + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.responseBytesCounter.Add(ctx, n, o) + } + + // traces + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + t.latencyMeasure.Record(ctx, elapsedTime, o) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, record: record, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + recorded atomic.Bool + record func(n int64) + body io.ReadCloser + read atomic.Int64 +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + // Record the number of bytes read + wb.read.Add(int64(n)) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.recordBytesRead() + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once. +func (wb *wrappedBody) recordBytesRead() { + // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that + // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using + // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish + // calling wb.record(wb.read.Load()). + if wb.recorded.CompareAndSwap(false, true) { + // Record the total number of bytes read + wb.record(wb.read.Load()) + } +} + +func (wb *wrappedBody) Close() error { + wb.recordBytesRead() + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 000000000..7499f688b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.48.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 000000000..11a35ed16 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.codespellignore b/event-exporter/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..ae6a3bcf1 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,5 @@ +ot +fo +te +collison +consequentially diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.codespellrc b/event-exporter/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..4afbb1fb3 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.gitattributes b/event-exporter/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.gitignore b/event-exporter/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..895c7664b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.gitmodules b/event-exporter/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 000000000..38a1f5698 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.golangci.yml b/event-exporter/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..a62511f38 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,296 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.lycheeignore b/event-exporter/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/event-exporter/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/event-exporter/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..ea1f723e2 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2909 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/EXPERIMENTAL.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### âš ï¸ Notice âš ï¸ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### âš ï¸ Notice âš ï¸ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.23.0...HEAD +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/CODEOWNERS b/event-exporter/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..31d336d92 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/event-exporter/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..c9f2bac55 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,645 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +### Maintainers + +- [David Ashpole](https://github.com/dashpole), Google +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Robert PajÄ…k](https://github.com/pellared), Splunk +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/LICENSE b/event-exporter/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/Makefile b/event-exporter/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..35fc18996 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,318 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: | $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: | $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.20 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: | $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/README.md b/event-exporter/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..44e1bfc9b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,108 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | + +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|---------|------------|--------------| +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.21 | 386 | +| Ubuntu | 1.20 | 386 | +| MacOS | 1.21 | amd64 | +| MacOS | 1.20 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.21 | 386 | +| Windows | 1.20 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +|---------------------------------------|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/RELEASING.md b/event-exporter/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..d2691d0bd --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,139 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/VERSIONING.md b/event-exporter/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 000000000..dafe7424d --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..fe2bc5766 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/filter.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..638c213d5 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..841b271fb --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/key.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..0656a04e4 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/kv.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..1ddf3ce05 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/set.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..fb6da5145 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" + "sync" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } + + // sortables is a pool of Sortables used to create Sets with a user does + // not provide one. + sortables = sync.Pool{ + New: func() interface{} { return new(Sortable) }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + srt := sortables.Get().(*Sortable) + s, _ := NewSetWithSortableFiltered(kvs, srt, nil) + sortables.Put(srt) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + srt := sortables.Get().(*Sortable) + s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) + sortables.Put(srt) + return s, filtered +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/attribute/value.go b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..cb21dd5c0 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..7d27cf77d --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,744 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require precent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateKey(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require precent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + val := strings.TrimSpace(v) + if !validateValue(val) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(val) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + } + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateKey(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string. A value is restricted to be + // US-ASCII characters excluding CTLs, whitespace, + // DQUOTE, comma, semicolon, and backslash. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(s[valueStart:valueEnd]) + if err != nil { + return + } + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return (c >= 0x23 && c <= 0x27) || + (c >= 0x30 && c <= 0x39) || + (c >= 0x41 && c <= 0x5a) || + (c >= 0x5e && c <= 0x7a) || + c == 0x21 || + c == 0x2a || + c == 0x2b || + c == 0x2d || + c == 0x2e || + c == 0x7c || + c == 0x7e +} + +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +func validateValueChar(c int32) bool { + return c == 0x21 || + (c >= 0x23 && c <= 0x2b) || + (c >= 0x2d && c <= 0x3a) || + (c >= 0x3c && c <= 0x5b) || + (c >= 0x5d && c <= 0x7e) +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/baggage/context.go b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..24b34b756 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/baggage/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..4545100df --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/codes/codes.go b/event-exporter/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..587ebae4e --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/codes/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..4e328fbb4 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..36d7c24e8 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/error_handler.go b/event-exporter/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..72fad8541 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/event-exporter/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..9a58fb1d3 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/handler.go b/event-exporter/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..4115fe3bb --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +var ( + // Compile-time check global.ErrDelegator implements ErrorHandler. + _ ErrorHandler = (*global.ErrDelegator)(nil) + // Compile-time check global.ErrLogger implements ErrorHandler. + _ ErrorHandler = (*global.ErrLogger)(nil) +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { global.Handle(err) } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..622c3ee3f --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b96e5408e --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..4469700d9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/gen.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..f532f07e9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..5e9b83047 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" +) + +var ( + // GlobalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + GlobalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*ErrDelegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*ErrLogger)(nil) +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +func (d *ErrDelegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *ErrDelegator) getDelegate() ErrorHandler { + return *d.delegate.Load() +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +func defaultErrorHandler() *ErrDelegator { + d := &ErrDelegator{} + d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// ErrLogger logs errors if no delegate is set, otherwise they are delegated. +type ErrLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *ErrLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return GlobalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + GlobalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..ebb13c206 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,371 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..c6f305a2b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger atomic.Pointer[logr.Logger] + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +func getLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..0097db478 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..06bac35c2 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/state.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..7985005bc --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..3f61ec12a --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..e07e79400 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/internal_logging.go b/event-exporter/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..c4f8acd5d --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..f95517195 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/LICENSE b/event-exporter/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..072baa8e8 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record int64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..9bd6ebf02 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/config.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..778ad2d74 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..54716e13b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..ae0bdbd2e --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/instrument.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..be89cd533 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,357 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/meter.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..2520bc74a --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..0a4825ae6 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..56667d32f --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/propagation.go b/event-exporter/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..d29aaa32c --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..303cdf1cb --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/propagation/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..c119eb285 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..c94438f73 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..63e5d6222 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/requirements.txt b/event-exporter/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..e0a43e138 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.2.6 diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 000000000..67d1d4c44 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1209 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 000000000..359c5a696 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 000000000..8ac9350d2 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 000000000..09ff4dfdb --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 000000000..342aede95 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 000000000..a2b906742 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2071 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 000000000..e449e5c3b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 000000000..851774148 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2610 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..caf7249de --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/LICENSE b/event-exporter/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/config.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..3aadc66cf --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/context.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..76f9a083c --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/doc.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..440f3d756 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..898db5a75 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..88fcb8161 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/noop.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..c125491ca --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/trace.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..26a4b2260 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,577 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/event-exporter/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..db936ba5b --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,331 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/verify_examples.sh b/event-exporter/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 000000000..dbb61a422 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/version.go b/event-exporter/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 000000000..38ba951ed --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.23.0" +} diff --git a/event-exporter/vendor/go.opentelemetry.io/otel/versions.yaml b/event-exporter/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..028393f07 --- /dev/null +++ b/event-exporter/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,52 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.23.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.45.1 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/prometheus + experimental-schema: + version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/event-exporter/vendor/golang.org/x/crypto/LICENSE b/event-exporter/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/event-exporter/vendor/golang.org/x/crypto/PATENTS b/event-exporter/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go new file mode 100644 index 000000000..661ea132e --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s new file mode 100644 index 000000000..7dd2638e8 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -0,0 +1,307 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_generic.go new file mode 100644 index 000000000..93eb5ae6d --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -0,0 +1,398 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms +// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/internal/alias" +) + +const ( + // KeySize is the size of the key used by this cipher, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // cipher, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20 variant of + // this cipher, in bytes. + NonceSizeX = 24 +) + +// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter + // (incremented after each block), and 3 of nonce. + key [8]uint32 + counter uint32 + nonce [3]uint32 + + // The last len bytes of buf are leftover key stream bytes from the previous + // XORKeyStream invocation. The size of buf depends on how many blocks are + // computed at a time by xorKeyStreamBlocks. + buf [bufSize]byte + len int + + // overflow is set when the counter overflowed, no more blocks can be + // generated, and the next XORKeyStream call should panic. + overflow bool + + // The counter-independent results of the first round are cached after they + // are computed the first time. + precompDone bool + p1, p5, p9, p13 uint32 + p2, p6, p10, p14 uint32 + p3, p7, p11, p15 uint32 +} + +var _ cipher.Stream = (*Cipher)(nil) + +// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given +// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, +// the XChaCha20 construction will be used. It returns an error if key or nonce +// have any other length. +// +// Note that ChaCha20, like all stream ciphers, is not authenticated and allows +// attackers to silently tamper with the plaintext. For this reason, it is more +// appropriate as a building block than as a standalone encryption mechanism. +// Instead, consider using package golang.org/x/crypto/chacha20poly1305. +func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { + // This function is split into a wrapper so that the Cipher allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + c := &Cipher{} + return newUnauthenticatedCipher(c, key, nonce) +} + +func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong key size") + } + if len(nonce) == NonceSizeX { + // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a + // derived key, allowing it to operate on a nonce of 24 bytes. See + // draft-irtf-cfrg-xchacha-01, Section 2.3. + key, _ = HChaCha20(key, nonce[0:16]) + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + nonce = cNonce + } else if len(nonce) != NonceSize { + return nil, errors.New("chacha20: wrong nonce size") + } + + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint + c.key = [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + } + c.nonce = [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + } + return c, nil +} + +// The constant first 4 words of the ChaCha20 state. +const ( + j0 uint32 = 0x61707865 // expa + j1 uint32 = 0x3320646e // nd 3 + j2 uint32 = 0x79622d32 // 2-by + j3 uint32 = 0x6b206574 // te k +) + +const blockSize = 64 + +// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. +// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 +// words each round, in columnar or diagonal groups of 4 at a time. +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = bits.RotateLeft32(d, 16) + c += d + b ^= c + b = bits.RotateLeft32(b, 12) + a += b + d ^= a + d = bits.RotateLeft32(d, 8) + c += d + b ^= c + b = bits.RotateLeft32(b, 7) + return a, b, c, d +} + +// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will +// behave as if (64 * counter) bytes had been encrypted so far. +// +// To prevent accidental counter reuse, SetCounter panics if counter is less +// than the current value. +// +// Note that the execution time of XORKeyStream is not independent of the +// counter value. +func (s *Cipher) SetCounter(counter uint32) { + // Internally, s may buffer multiple blocks, which complicates this + // implementation slightly. When checking whether the counter has rolled + // back, we must use both s.counter and s.len to determine how many blocks + // we have already output. + outputCounter := s.counter - uint32(s.len)/blockSize + if s.overflow || counter < outputCounter { + panic("chacha20: SetCounter attempted to rollback counter") + } + + // In the general case, we set the new counter value and reset s.len to 0, + // causing the next call to XORKeyStream to refill the buffer. However, if + // we're advancing within the existing buffer, we can save work by simply + // setting s.len. + if counter < s.counter { + s.len = int(s.counter-counter) * blockSize + } else { + s.counter = counter + s.len = 0 + } +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(src) == 0 { + return + } + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + dst = dst[:len(src)] + if alias.InexactOverlap(dst, src) { + panic("chacha20: invalid buffer overlap") + } + + // First, drain any remaining key stream from a previous XORKeyStream. + if s.len != 0 { + keyStream := s.buf[bufSize-s.len:] + if len(src) < len(keyStream) { + keyStream = keyStream[:len(src)] + } + _ = src[len(keyStream)-1] // bounds check elimination hint + for i, b := range keyStream { + dst[i] = src[i] ^ b + } + s.len -= len(keyStream) + dst, src = dst[len(keyStream):], src[len(keyStream):] + } + if len(src) == 0 { + return + } + + // If we'd need to let the counter overflow and keep generating output, + // panic immediately. If instead we'd only reach the last block, remember + // not to generate any more output after the buffer is drained. + numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize + if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { + panic("chacha20: counter overflow") + } else if uint64(s.counter)+numBlocks == 1<<32 { + s.overflow = true + } + + // xorKeyStreamBlocks implementations expect input lengths that are a + // multiple of bufSize. Platform-specific ones process multiple blocks at a + // time, so have bufSizes that are a multiple of blockSize. + + full := len(src) - len(src)%bufSize + if full > 0 { + s.xorKeyStreamBlocks(dst[:full], src[:full]) + } + dst, src = dst[full:], src[full:] + + // If using a multi-block xorKeyStreamBlocks would overflow, use the generic + // one that does one block at a time. + const blocksPerBuf = bufSize / blockSize + if uint64(s.counter)+blocksPerBuf > 1<<32 { + s.buf = [bufSize]byte{} + numBlocks := (len(src) + blockSize - 1) / blockSize + buf := s.buf[bufSize-numBlocks*blockSize:] + copy(buf, src) + s.xorKeyStreamBlocksGeneric(buf, buf) + s.len = len(buf) - copy(dst, buf) + return + } + + // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and + // keep the leftover keystream for the next XORKeyStream invocation. + if len(src) > 0 { + s.buf = [bufSize]byte{} + copy(s.buf[:], src) + s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) + s.len = bufSize - copy(dst, s.buf[:]) + } +} + +func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { + if len(dst) != len(src) || len(dst)%blockSize != 0 { + panic("chacha20: internal error: wrong dst and/or src length") + } + + // To generate each block of key stream, the initial cipher state + // (represented below) is passed through 20 rounds of shuffling, + // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) + // or by diagonals (like 1, 6, 11, 12). + // + // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc + // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk + // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk + // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn + // + // c=constant k=key b=blockcount n=nonce + var ( + c0, c1, c2, c3 = j0, j1, j2, j3 + c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] + c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] + _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] + ) + + // Three quarters of the first round don't depend on the counter, so we can + // calculate them here, and reuse them for multiple blocks in the loop, and + // for future XORKeyStream invocations. + if !s.precompDone { + s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) + s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) + s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) + s.precompDone = true + } + + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { + // The remainder of the first column round. + fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) + + // The second diagonal round. + x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) + x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) + x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) + x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) + + // The remaining 18 rounds. + for i := 0; i < 9; i++ { + // Column round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Diagonal round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) + + s.counter += 1 + + src, dst = src[blockSize:], dst[blockSize:] + } +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes +// key and a 16 bytes nonce. It returns an error if key or nonce have any other +// length. It is used as part of the XChaCha20 construction. +func HChaCha20(key, nonce []byte) ([]byte, error) { + // This function is split into a wrapper so that the slice allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + out := make([]byte, 32) + return hChaCha20(out, key, nonce) +} + +func hChaCha20(out, key, nonce []byte) ([]byte, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong HChaCha20 key size") + } + if len(nonce) != 16 { + return nil, errors.New("chacha20: wrong HChaCha20 nonce size") + } + + x0, x1, x2, x3 := j0, j1, j2, j3 + x4 := binary.LittleEndian.Uint32(key[0:4]) + x5 := binary.LittleEndian.Uint32(key[4:8]) + x6 := binary.LittleEndian.Uint32(key[8:12]) + x7 := binary.LittleEndian.Uint32(key[12:16]) + x8 := binary.LittleEndian.Uint32(key[16:20]) + x9 := binary.LittleEndian.Uint32(key[20:24]) + x10 := binary.LittleEndian.Uint32(key[24:28]) + x11 := binary.LittleEndian.Uint32(key[28:32]) + x12 := binary.LittleEndian.Uint32(nonce[0:4]) + x13 := binary.LittleEndian.Uint32(nonce[4:8]) + x14 := binary.LittleEndian.Uint32(nonce[8:12]) + x15 := binary.LittleEndian.Uint32(nonce[12:16]) + + for i := 0; i < 10; i++ { + // Diagonal round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Column round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + _ = out[31] // bounds check elimination hint + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x12) + binary.LittleEndian.PutUint32(out[20:24], x13) + binary.LittleEndian.PutUint32(out[24:28], x14) + binary.LittleEndian.PutUint32(out[28:32], x15) + return out, nil +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go new file mode 100644 index 000000000..db42e6676 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego + +package chacha20 + +const bufSize = blockSize + +func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { + s.xorKeyStreamBlocksGeneric(dst, src) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go new file mode 100644 index 000000000..3a4287f99 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s new file mode 100644 index 000000000..66aebae25 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -0,0 +1,449 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Code for the perl script that generates the ppc64 assembler +// can be found in the cryptogams repository at the link below. It is based on +// the original from openssl. + +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 + +// The differences in this and the original implementation are +// due to the calling conventions and initialization of constants. + +//go:build gc && !purego + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 +#define TMP R15 + +#define CONSTBASE R16 +#define BLOCKS R17 + +DATA consts<>+0x00(SB)/8, $0x3320646e61707865 +DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 +DATA consts<>+0x10(SB)/8, $0x0000000000000001 +DATA consts<>+0x18(SB)/8, $0x0000000000000000 +DATA consts<>+0x20(SB)/8, $0x0000000000000004 +DATA consts<>+0x28(SB)/8, $0x0000000000000000 +DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA consts<>+0x38(SB)/8, $0x0203000106070405 +DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA consts<>+0x48(SB)/8, $0x0102030005060704 +DATA consts<>+0x50(SB)/8, $0x6170786561707865 +DATA consts<>+0x58(SB)/8, $0x6170786561707865 +DATA consts<>+0x60(SB)/8, $0x3320646e3320646e +DATA consts<>+0x68(SB)/8, $0x3320646e3320646e +DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x90(SB)/8, $0x0000000100000000 +DATA consts<>+0x98(SB)/8, $0x0000000300000002 +GLOBL consts<>(SB), RODATA, $0xa0 + +//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) +TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + // Addressing for constants + MOVD $consts<>+0x00(SB), CONSTBASE + MOVD $16, R8 + MOVD $32, R9 + MOVD $48, R10 + MOVD $64, R11 + SRD $6, LEN, BLOCKS + // V16 + LXVW4X (CONSTBASE)(R0), VS48 + ADD $80,CONSTBASE + + // Load key into V17,V18 + LXVW4X (KEY)(R0), VS49 + LXVW4X (KEY)(R8), VS50 + + // Load CNT, NONCE into V19 + LXVW4X (CNT)(R0), VS51 + + // Clear V27 + VXOR V27, V27, V27 + + // V28 + LXVW4X (CONSTBASE)(R11), VS60 + + // splat slot from V19 -> V26 + VSPLTW $0, V19, V26 + + VSLDOI $4, V19, V27, V19 + VSLDOI $12, V27, V19, V19 + + VADDUWM V26, V28, V26 + + MOVD $10, R14 + MOVD R14, CTR + +loop_outer_vsx: + // V0, V1, V2, V3 + LXVW4X (R0)(CONSTBASE), VS32 + LXVW4X (R8)(CONSTBASE), VS33 + LXVW4X (R9)(CONSTBASE), VS34 + LXVW4X (R10)(CONSTBASE), VS35 + + // splat values from V17, V18 into V4-V11 + VSPLTW $0, V17, V4 + VSPLTW $1, V17, V5 + VSPLTW $2, V17, V6 + VSPLTW $3, V17, V7 + VSPLTW $0, V18, V8 + VSPLTW $1, V18, V9 + VSPLTW $2, V18, V10 + VSPLTW $3, V18, V11 + + // VOR + VOR V26, V26, V12 + + // splat values from V19 -> V13, V14, V15 + VSPLTW $1, V19, V13 + VSPLTW $2, V19, V14 + VSPLTW $3, V19, V15 + + // splat const values + VSPLTISW $-16, V27 + VSPLTISW $12, V28 + VSPLTISW $8, V29 + VSPLTISW $7, V30 + +loop_vsx: + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + VRLW V15, V27, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V28, V4 + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + VRLW V15, V29, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V30, V4 + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V27, V15 + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + VRLW V4, V28, V4 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V29, V15 + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + VRLW V4, V30, V4 + BC 16, LT, loop_vsx + + VADDUWM V12, V26, V12 + + WORD $0x13600F8C // VMRGEW V0, V1, V27 + WORD $0x13821F8C // VMRGEW V2, V3, V28 + + WORD $0x10000E8C // VMRGOW V0, V1, V0 + WORD $0x10421E8C // VMRGOW V2, V3, V2 + + WORD $0x13A42F8C // VMRGEW V4, V5, V29 + WORD $0x13C63F8C // VMRGEW V6, V7, V30 + + XXPERMDI VS32, VS34, $0, VS33 + XXPERMDI VS32, VS34, $3, VS35 + XXPERMDI VS59, VS60, $0, VS32 + XXPERMDI VS59, VS60, $3, VS34 + + WORD $0x10842E8C // VMRGOW V4, V5, V4 + WORD $0x10C63E8C // VMRGOW V6, V7, V6 + + WORD $0x13684F8C // VMRGEW V8, V9, V27 + WORD $0x138A5F8C // VMRGEW V10, V11, V28 + + XXPERMDI VS36, VS38, $0, VS37 + XXPERMDI VS36, VS38, $3, VS39 + XXPERMDI VS61, VS62, $0, VS36 + XXPERMDI VS61, VS62, $3, VS38 + + WORD $0x11084E8C // VMRGOW V8, V9, V8 + WORD $0x114A5E8C // VMRGOW V10, V11, V10 + + WORD $0x13AC6F8C // VMRGEW V12, V13, V29 + WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + + XXPERMDI VS40, VS42, $0, VS41 + XXPERMDI VS40, VS42, $3, VS43 + XXPERMDI VS59, VS60, $0, VS40 + XXPERMDI VS59, VS60, $3, VS42 + + WORD $0x118C6E8C // VMRGOW V12, V13, V12 + WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + + VSPLTISW $4, V27 + VADDUWM V26, V27, V26 + + XXPERMDI VS44, VS46, $0, VS45 + XXPERMDI VS44, VS46, $3, VS47 + XXPERMDI VS61, VS62, $0, VS44 + XXPERMDI VS61, VS62, $3, VS46 + + VADDUWM V0, V16, V0 + VADDUWM V4, V17, V4 + VADDUWM V8, V18, V8 + VADDUWM V12, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + // Bottom of loop + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V1, V16, V0 + VADDUWM V5, V17, V4 + VADDUWM V9, V18, V8 + VADDUWM V13, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + VXOR V27, V0, V27 + + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(V10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V2, V16, V0 + VADDUWM V6, V17, V4 + VADDUWM V10, V18, V8 + VADDUWM V14, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V3, V16, V0 + VADDUWM V7, V17, V4 + VADDUWM V11, V18, V8 + VADDUWM V15, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + + MOVD $10, R14 + MOVD R14, CTR + BNE loop_outer_vsx + +done_vsx: + // Increment counter by number of 64 byte blocks + MOVD (CNT), R14 + ADD BLOCKS, R14 + MOVD R14, (CNT) + RET + +tail_vsx: + ADD $32, R1, R11 + MOVD LEN, CTR + + // Save values on stack to copy from + STXVW4X VS32, (R11)(R0) + STXVW4X VS36, (R11)(R8) + STXVW4X VS40, (R11)(R9) + STXVW4X VS44, (R11)(R10) + ADD $-1, R11, R12 + ADD $-1, INP + ADD $-1, OUT + +looptail_vsx: + // Copying the result to OUT + // in bytes. + MOVBZU 1(R12), KEY + MOVBZU 1(INP), TMP + XOR KEY, TMP, KEY + MOVBU KEY, 1(OUT) + BC 16, LT, looptail_vsx + + // Clear the stack values + STXVW4X VS48, (R11)(R0) + STXVW4X VS48, (R11)(R8) + STXVW4X VS48, (R11)(R9) + STXVW4X VS48, (R11)(R10) + BR done_vsx diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go new file mode 100644 index 000000000..683ccfd1c --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +import "golang.org/x/sys/cpu" + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. Implementation in asm_s390x.s. +// +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + if cpu.S390X.HasVX { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } else { + c.xorKeyStreamBlocksGeneric(dst, src) + } +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s new file mode 100644 index 000000000..1eda91a3d --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -0,0 +1,224 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + + VSTEF $0, CTR, (R7) + RET diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20/xor.go b/event-exporter/vendor/golang.org/x/crypto/chacha20/xor.go new file mode 100644 index 000000000..c2d04851e --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20/xor.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import "runtime" + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// addXor reads a little endian uint32 from src, XORs it with (a + b) and +// places the result in little endian byte order in dst. +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= a + b + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) + } +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 000000000..93da7322b --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,98 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its +// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and +// draft-irtf-cfrg-xchacha-01. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 + + // Overhead is the size of the Poly1305 authentication tag, and the + // difference between a ciphertext length and its plaintext. + Overhead = 16 +) + +type chacha20poly1305 struct { + key [KeySize]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return Overhead +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 000000000..50695a14f --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/alias" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = binary.LittleEndian.Uint32(key[0:4]) + state[5] = binary.LittleEndian.Uint32(key[4:8]) + state[6] = binary.LittleEndian.Uint32(key[8:12]) + state[7] = binary.LittleEndian.Uint32(key[12:16]) + state[8] = binary.LittleEndian.Uint32(key[16:20]) + state[9] = binary.LittleEndian.Uint32(key[20:24]) + state[10] = binary.LittleEndian.Uint32(key[24:28]) + state[11] = binary.LittleEndian.Uint32(key[28:32]) + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[0:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s new file mode 100644 index 000000000..731d2ac6d --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -0,0 +1,2715 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. + +//go:build gc && !purego + +#include "textflag.h" +// General register allocation +#define oup DI +#define inp SI +#define inl BX +#define adp CX // free to reuse, after we hash the additional data +#define keyp R8 // free to reuse, when we copy the key to stack +#define itr2 R9 // general iterator +#define itr1 CX // general iterator +#define acc0 R10 +#define acc1 R11 +#define acc2 R12 +#define t0 R13 +#define t1 R14 +#define t2 R15 +#define t3 R8 +// Register and stack allocation for the SSE code +#define rStore (0*16)(BP) +#define sStore (1*16)(BP) +#define state1Store (2*16)(BP) +#define state2Store (3*16)(BP) +#define tmpStore (4*16)(BP) +#define ctr0Store (5*16)(BP) +#define ctr1Store (6*16)(BP) +#define ctr2Store (7*16)(BP) +#define ctr3Store (8*16)(BP) +#define A0 X0 +#define A1 X1 +#define A2 X2 +#define B0 X3 +#define B1 X4 +#define B2 X5 +#define C0 X6 +#define C1 X7 +#define C2 X8 +#define D0 X9 +#define D1 X10 +#define D2 X11 +#define T0 X12 +#define T1 X13 +#define T2 X14 +#define T3 X15 +#define A3 T0 +#define B3 T1 +#define C3 T2 +#define D3 T3 +// Register and stack allocation for the AVX2 code +#define rsStoreAVX2 (0*32)(BP) +#define state1StoreAVX2 (1*32)(BP) +#define state2StoreAVX2 (2*32)(BP) +#define ctr0StoreAVX2 (3*32)(BP) +#define ctr1StoreAVX2 (4*32)(BP) +#define ctr2StoreAVX2 (5*32)(BP) +#define ctr3StoreAVX2 (6*32)(BP) +#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack +#define AA0 Y0 +#define AA1 Y5 +#define AA2 Y6 +#define AA3 Y7 +#define BB0 Y14 +#define BB1 Y9 +#define BB2 Y10 +#define BB3 Y11 +#define CC0 Y12 +#define CC1 Y13 +#define CC2 Y8 +#define CC3 Y15 +#define DD0 Y4 +#define DD1 Y1 +#define DD2 Y2 +#define DD3 Y3 +#define TT0 DD3 +#define TT1 AA3 +#define TT2 BB3 +#define TT3 CC3 +// ChaCha20 constants +DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 + +// Some macros + +// ROL rotates the uint32s in register R left by N bits, using temporary T. +#define ROL(N, R, T) \ + MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R + +// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL16(R, T) PSHUFB ·rol16<>(SB), R +#else +#define ROL16(R, T) ROL(16, R, T) +#endif + +// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL8(R, T) PSHUFB ·rol8<>(SB), R +#else +#define ROL8(R, T) ROL(8, R, T) +#endif + +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; ROL16(D, T) \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; ROL8(D, T) \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t0, t1 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 000000000..6313898f0 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" +) + +func writeWithPadding(p *poly1305.MAC, b []byte) { + p.Write(b) + if rem := len(b) % 16; rem != 0 { + var buf [16]byte + padLen := 16 - rem + p.Write(buf[:padLen]) + } +} + +func writeUint64(p *poly1305.MAC, n int) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], uint64(n)) + p.Write(buf[:]) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + s.XORKeyStream(ciphertext, plaintext) + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(plaintext)) + p.Sum(tag[:0]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + tag := ciphertext[len(ciphertext)-16:] + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(ciphertext)) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !p.Verify(tag) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 000000000..34e6ab1df --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go new file mode 100644 index 000000000..1cebfe946 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" + + "golang.org/x/crypto/chacha20" +) + +type xchacha20poly1305 struct { + key [KeySize]byte +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return Overhead +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..2492f796a --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..cda8e3edf --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/event-exporter/vendor/golang.org/x/crypto/cryptobyte/builder.go b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..cf254f5f1 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/event-exporter/vendor/golang.org/x/crypto/cryptobyte/string.go b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..10692a8a3 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/event-exporter/vendor/golang.org/x/crypto/hkdf/hkdf.go b/event-exporter/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 000000000..f4ded5fee --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + if f.counter > 1 { + f.expander.Reset() + } + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias.go b/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias.go new file mode 100644 index 000000000..551ff0c35 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go new file mode 100644 index 000000000..6fe61b5c6 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go new file mode 100644 index 000000000..333da285b --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go new file mode 100644 index 000000000..4aaea810a --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go new file mode 100644 index 000000000..164cd47d3 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go @@ -0,0 +1,47 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s new file mode 100644 index 000000000..e0d3c6475 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go new file mode 100644 index 000000000..ec2202bd7 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -0,0 +1,312 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import ( + "encoding/binary" + "math/bits" +) + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * râ´ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³Ⱐ- 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³Ⱐ- 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2â¶â´ + x[2] * 2¹²â¸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³Ⱐ- 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³Ⱐ- 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// hâ‚Š = (h + m) * r mod 2¹³Ⱐ- 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³Ⱐ- 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²â¸) limb, h2. + if len(msg) >= TagSize { + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³Ⱐ+ n = c * 5 + n mod 2¹³Ⱐ- 5 + // + // because 2¹³Ⱐ= 5 mod 2¹³Ⱐ- 5. Partial reduction since the result is + // likely to be larger than 2¹³Ⱐ- 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³Ⱐmark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⸠+ (2¹²⸠- 1) = 6 * 2¹²⸠- 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³Ⱐ- 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⸠+func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³Ⱐ- 5, but will be less than 2 * (2¹³Ⱐ- 5). To complete the reduction + // in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⸠+ // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go new file mode 100644 index 000000000..4aec4874b --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s new file mode 100644 index 000000000..d2ca5deeb --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -0,0 +1,181 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP R5, $0 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP R17, $0 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP R5, $0 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go new file mode 100644 index 000000000..e1d033a49 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +// +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s new file mode 100644 index 000000000..0fe3a7c21 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -0,0 +1,503 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accommodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]râ´ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]râ´ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xáµ¢[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³Ⱐ- 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// hâ‚₃₀ = (fâ‚₃₀gâ‚₃₀) % 2¹³Ⱐ+ (5fâ‚₃₀gâ‚₃₀) / 2¹³Ⱐ+// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²ⶠ- 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⸠to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²ⶠ- 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³Ⱐ- 5) + // Pack each lane in h₂₆[0:4] into hâ‚₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/event-exporter/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/event-exporter/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfdb..000000000 --- a/event-exporter/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/event-exporter/vendor/golang.org/x/net/context/go17.go b/event-exporter/vendor/golang.org/x/net/context/go17.go index 2cb9c408f..0c1b86793 100644 --- a/event-exporter/vendor/golang.org/x/net/context/go17.go +++ b/event-exporter/vendor/golang.org/x/net/context/go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 -// +build go1.7 package context diff --git a/event-exporter/vendor/golang.org/x/net/context/go19.go b/event-exporter/vendor/golang.org/x/net/context/go19.go index 64d31ecc3..e31e35a90 100644 --- a/event-exporter/vendor/golang.org/x/net/context/go19.go +++ b/event-exporter/vendor/golang.org/x/net/context/go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package context diff --git a/event-exporter/vendor/golang.org/x/net/context/pre_go17.go b/event-exporter/vendor/golang.org/x/net/context/pre_go17.go index 7b6b68511..065ff3dfa 100644 --- a/event-exporter/vendor/golang.org/x/net/context/pre_go17.go +++ b/event-exporter/vendor/golang.org/x/net/context/pre_go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/event-exporter/vendor/golang.org/x/net/context/pre_go19.go b/event-exporter/vendor/golang.org/x/net/context/pre_go19.go index 1f9715341..ec5a63803 100644 --- a/event-exporter/vendor/golang.org/x/net/context/pre_go19.go +++ b/event-exporter/vendor/golang.org/x/net/context/pre_go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.9 -// +build !go1.9 package context diff --git a/event-exporter/vendor/golang.org/x/net/http2/databuffer.go b/event-exporter/vendor/golang.org/x/net/http2/databuffer.go index a3067f8de..e6f55cbd1 100644 --- a/event-exporter/vendor/golang.org/x/net/http2/databuffer.go +++ b/event-exporter/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/event-exporter/vendor/golang.org/x/net/http2/frame.go b/event-exporter/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc..e2b298d85 100644 --- a/event-exporter/vendor/golang.org/x/net/http2/frame.go +++ b/event-exporter/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/event-exporter/vendor/golang.org/x/net/http2/go111.go b/event-exporter/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b032..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/go115.go b/event-exporter/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab9..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/go118.go b/event-exporter/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b31..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/not_go111.go b/event-exporter/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa819..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/not_go115.go b/event-exporter/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7a..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/not_go118.go b/event-exporter/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c96..000000000 --- a/event-exporter/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/event-exporter/vendor/golang.org/x/net/http2/server.go b/event-exporter/vendor/golang.org/x/net/http2/server.go index 02c88b6b3..ae94c6408 100644 --- a/event-exporter/vendor/golang.org/x/net/http2/server.go +++ b/event-exporter/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/event-exporter/vendor/golang.org/x/net/http2/transport.go b/event-exporter/vendor/golang.org/x/net/http2/transport.go index 4515b22c4..df578b86c 100644 --- a/event-exporter/vendor/golang.org/x/net/http2/transport.go +++ b/event-exporter/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/event-exporter/vendor/golang.org/x/net/idna/go118.go b/event-exporter/vendor/golang.org/x/net/idna/go118.go index c5c4338db..712f1ad83 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/go118.go +++ b/event-exporter/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/idna10.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85fe..7b3717884 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/event-exporter/vendor/golang.org/x/net/idna/idna9.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698cef..cc6a892a4 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/event-exporter/vendor/golang.org/x/net/idna/pre_go118.go b/event-exporter/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1c..40e74bb3d 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/pre_go118.go +++ b/event-exporter/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables10.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef45..c6c2bf10a 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables11.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba71..76789393c 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables12.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bcc..0600cd2ae 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables13.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701eadf..2fb768ef6 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables15.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778f..5ff05fe1a 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/tables9.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b5332..0f25e84ca 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/trie12.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904b..8a75b9667 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/event-exporter/vendor/golang.org/x/net/idna/trie13.0.0.go b/event-exporter/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc1..fa45bb907 100644 --- a/event-exporter/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/event-exporter/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/event-exporter/vendor/golang.org/x/oauth2/AUTHORS b/event-exporter/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/event-exporter/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/event-exporter/vendor/golang.org/x/oauth2/CONTRIBUTORS b/event-exporter/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/event-exporter/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/event-exporter/vendor/golang.org/x/oauth2/README.md b/event-exporter/vendor/golang.org/x/oauth2/README.md index 1473e1296..781770c20 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/README.md +++ b/event-exporter/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/event-exporter/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/event-exporter/vendor/golang.org/x/oauth2/authhandler/authhandler.go index 69967cf87..9bc6cd7bc 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/authhandler/authhandler.go +++ b/event-exporter/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -13,11 +13,36 @@ import ( "golang.org/x/oauth2" ) +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" +) + +// PKCEParams holds parameters to support PKCE. +type PKCEParams struct { + Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. + ChallengeMethod string // The encryption method (ex. S256). + Verifier string // The original, non-encrypted secret. +} + // AuthorizationHandler is a 3-legged-OAuth helper that prompts // the user for OAuth consent at the specified auth code URL // and returns an auth code and state upon approval. type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) +// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. +// +// The pkce parameter supports PKCE flow, which uses code challenge and code verifier +// to prevent CSRF attacks. A unique code challenge and code verifier should be generated +// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) +} + // TokenSource returns an oauth2.TokenSource that fetches access tokens // using 3-legged-OAuth flow. // @@ -33,7 +58,7 @@ type AuthorizationHandler func(authCodeURL string) (code string, state string, e // and response before exchanging the auth code for OAuth token to prevent CSRF // attacks. func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state}) + return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) } type authHandlerSource struct { @@ -41,10 +66,17 @@ type authHandlerSource struct { config *oauth2.Config authHandler AuthorizationHandler state string + pkce *PKCEParams } func (source authHandlerSource) Token() (*oauth2.Token, error) { - url := source.config.AuthCodeURL(source.state) + // Step 1: Obtain auth code. + var authCodeUrlOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { + authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), + oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} + } + url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) code, state, err := source.authHandler(url) if err != nil { return nil, err @@ -52,5 +84,11 @@ func (source authHandlerSource) Token() (*oauth2.Token, error) { if state != source.state { return nil, errors.New("state mismatch in 3-legged-OAuth flow") } - return source.config.Exchange(source.ctx, code) + + // Step 2: Exchange auth code for access token. + var exchangeOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Verifier != "" { + exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} + } + return source.config.Exchange(source.ctx, code, exchangeOptions...) } diff --git a/event-exporter/vendor/golang.org/x/oauth2/deviceauth.go b/event-exporter/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b90..e61587945 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d2..9c79aa0a0 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/default.go b/event-exporter/vendor/golang.org/x/oauth2/google/default.go index 880dd7b59..02ccd08a7 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/default.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/default.go @@ -8,17 +8,23 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" "runtime" + "sync" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + universeDomainDefault = "googleapis.com" +) + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -35,6 +41,75 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return universeDomainDefault + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. +// +// The default value is "googleapis.com". +// +// It obtains the universe domain from the attached service account on GCE when +// authenticating via the GCE metadata server. See also [The attached service +// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +// If the GCE metadata server returns a 404 error, the default value is +// returned. If the GCE metadata server returns an error other than 404, the +// error is returned. +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && metadata.OnGCE() { + // If we're on Google Compute Engine, an App Engine standard second + // generation runtime, or App Engine flexible, use the metadata server. + err := c.computeUniverseDomain() + if err != nil { + return "", err + } + } + // If not on Google Compute Engine, or in case of any non-error path in + // computeUniverseDomain that did not set universeDomain, set the default + // universe domain. + if c.universeDomain == "" { + c.universeDomain = universeDomainDefault + } + return c.universeDomain, nil +} + +// computeUniverseDomain fetches the default service domain for a given Cloud +// universe from Google Compute Engine (GCE)'s metadata server. It's only valid +// to use this method if your program is running on a GCE instance. +func (c *Credentials) computeUniverseDomain() error { + var err error + c.universeDomain, err = metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + c.universeDomain = universeDomainDefault + return nil + } else { + return err + } + } + return nil } // DefaultCredentials is the old name of Credentials. @@ -54,11 +129,32 @@ type CredentialsParams struct { // Optional. Subject string - // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Optional. + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. AuthHandler authhandler.AuthorizationHandler - // State is a unique string used with AuthHandler. Optional. + // State is a unique string used with AuthHandler. Required for 3LO flow. State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -94,20 +190,20 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // It looks for credentials in the following places, // preferring the first location found: // -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// For workload identity federation, refer to -// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on -// how to generate the JSON configuration file for on-prem/non-Google cloud -// platforms. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -124,17 +220,15 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, params); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) // and App Engine flexible use ComputeTokenSource and the metadata server. if appengineTokenFunc != nil { - return &DefaultCredentials{ + return &Credentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil @@ -144,15 +238,15 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() - return &DefaultCredentials{ - ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + return &Credentials{ + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + universeDomain: params.UniverseDomain, }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. @@ -176,7 +270,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if config != nil { return &Credentials{ ProjectID: "", - TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), JSON: jsonData, }, nil } @@ -186,14 +280,26 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = universeDomainDefault + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ts = newErrWrappingTokenSource(ts) + return &Credentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } @@ -212,8 +318,8 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { - b, err := ioutil.ReadFile(filename) +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { + b, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/doc.go b/event-exporter/vendor/golang.org/x/oauth2/google/doc.go index 8e6a57ce9..03c42c6f8 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/doc.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/doc.go @@ -15,18 +15,18 @@ // For more information on using workload identity federation, refer to // https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // -// OAuth2 Configs +// # OAuth2 Configs // // Two functions in this package return golang.org/x/oauth2.Config values from Google credential // data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// Workload Identity Federation +// # Workload Identity Federation // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,23 +36,79 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // -// For OIDC providers, the library can retrieve OIDC tokens either from a -// local file location (file-sourced credentials) or from a local server -// (URL-sourced credentials). +// For OIDC and SAML providers, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration // +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. // -// Credentials +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in +// +// # Security considerations +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Credentials // // The Credentials type represents Google credentials, including Application Default // Credentials. diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/error.go b/event-exporter/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000..d84dd0047 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/google.go b/event-exporter/vendor/golang.org/x/oauth2/google/google.go index ccc23ee0a..c66c53527 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/google.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/google.go @@ -16,16 +16,21 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://oauth2.googleapis.com/token" @@ -92,10 +97,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -103,12 +109,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -122,15 +129,23 @@ type credentialsFile struct { TokenURLExternal string `json:"token_url"` TokenInfoURL string `json:"token_info_url"` ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` Delegates []string `json:"delegates"` CredentialSource externalaccount.CredentialSource `json:"credential_source"` QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, @@ -139,6 +154,7 @@ func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config Scopes: scopes, TokenURL: f.TokenURL, Subject: subject, // This is the user email to impersonate + Audience: f.Audience, } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -166,7 +182,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar cfg.Endpoint.AuthURL = Endpoint.AuthURL } if cfg.Endpoint.TokenURL == "" { - cfg.Endpoint.TokenURL = Endpoint.TokenURL + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil @@ -177,12 +197,26 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar TokenURL: f.TokenURLExternal, TokenInfoURL: f.TokenInfoURL, ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - WorkforcePoolUserProject: f.WorkforcePoolUserProject, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + } + return cfg.TokenSource(ctx) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, } return cfg.TokenSource(ctx) case impersonatedServiceAccount: @@ -217,7 +251,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index a5a5423c6..bd4efd19b 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -52,9 +52,23 @@ const ( // The AWS authorization header name for the security session token if available. awsSecurityTokenHeader = "x-amz-security-token" + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -241,6 +255,7 @@ type awsCredentialSource struct { RegionURL string RegionalCredVerificationURL string CredVerificationURL string + IMDSv2SessionTokenURL string TargetResource string requestSigner *awsRequestSigner region string @@ -266,14 +281,45 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + +func (cs awsCredentialSource) credentialSourceType() string { + return "aws" +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSecurityCredentials, err := cs.getSecurityCredentials() + headers := make(map[string]string) + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) if err != nil { return "", err } - if cs.region, err = cs.getRegion(); err != nil { + if cs.region, err = cs.getRegion(headers); err != nil { return "", err } @@ -340,12 +386,42 @@ func (cs awsCredentialSource) subjectToken() (string, error) { return url.QueryEscape(string(result)), nil } -func (cs *awsCredentialSource) getRegion() (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.IMDSv2SessionTokenURL == "" { + return "", nil } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + + req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) + } + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -357,6 +433,10 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err @@ -381,23 +461,21 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } - roleName, err := cs.getMetadataRoleName() + roleName, err := cs.getMetadataRoleName(headers) if err != nil { return } - credentials, err := cs.getMetadataSecurityCredentials(roleName) + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) if err != nil { return } @@ -413,7 +491,7 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede return credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { var result awsSecurityCredentials req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) @@ -422,6 +500,10 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( } req.Header.Add("Content-Type", "application/json") + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return result, err @@ -441,7 +523,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( return result, err } -func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { if cs.CredVerificationURL == "" { return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") } @@ -451,6 +533,10 @@ func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index bc3ce5317..33288d367 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -8,13 +8,12 @@ import ( "context" "fmt" "net/http" - "net/url" "regexp" "strconv" - "strings" "time" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" ) // now aliases time.Now for testing @@ -39,6 +38,9 @@ type Config struct { // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int // ClientSecret is currently only required if token_info endpoint also // needs to be called with the generated GCP access token. When provided, STS will be // called with additional basic authentication using client_id as username and client_secret as password. @@ -60,70 +62,23 @@ type Config struct { WorkforcePoolUserProject string } -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - var ( - validTokenURLPatterns = []*regexp.Regexp{ - // The complicated part in the middle matches any number of characters that - // aren't period, spaces, or slashes. - regexp.MustCompile(`(?i)^[^\.\s\/\\]+\.sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), - } - validImpersonateURLPatterns = []*regexp.Regexp{ - regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), - } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - func validateWorkforceAudience(input string) bool { return validWorkforceAudiencePattern.MatchString(input) } // TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, validTokenURLPatterns, validImpersonateURLPatterns, "https") + return c.tokenSource(ctx, "https") } // tokenSource is a private function that's directly called by some of the tests, // because the unit test URLs are mocked, and would otherwise fail the // validity check. -func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Regexp, impersonateURLValidPats []*regexp.Regexp, scheme string) (oauth2.TokenSource, error) { - valid := validateURL(c.TokenURL, tokenURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid TokenURL provided while constructing tokenSource") - } - - if c.ServiceAccountImpersonationURL != "" { - valid := validateURL(c.ServiceAccountImpersonationURL, impersonateURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid ServiceAccountImpersonationURL provided while constructing tokenSource") - } - } - +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { if c.WorkforcePoolUserProject != "" { valid := validateWorkforceAudience(c.Audience) if !valid { @@ -141,10 +96,11 @@ func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Re scopes := c.Scopes ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, } return oauth2.ReuseTokenSource(nil, imp), nil } @@ -163,7 +119,7 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// Either the File or the URL field should be filled, depending on the kind of credential in question. +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -171,38 +127,56 @@ type CredentialSource struct { URL string `json:"url"` Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable"` + EnvironmentID string `json:"environment_id"` RegionURL string `json:"region_url"` RegionalCredVerificationURL string `json:"regional_cred_verification_url"` CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` Format format `json:"format"` } -// parse determines the type of CredentialSource needed +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis *int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// parse determines the type of CredentialSource needed. func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { if awsVersion != 1 { return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) } - return awsCredentialSource{ + + awsCredSource := awsCredentialSource{ EnvironmentID: c.CredentialSource.EnvironmentID, RegionURL: c.CredentialSource.RegionURL, RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, CredVerificationURL: c.CredentialSource.URL, TargetResource: c.Audience, ctx: ctx, - }, nil + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil } } else if c.CredentialSource.File != "" { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) } return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } type baseCredentialSource interface { + credentialSourceType() string subjectToken() (string, error) } @@ -212,6 +186,15 @@ type tokenSource struct { conf *Config } +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + // Token allows tokenSource to conform to the oauth2.TokenSource interface. func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf @@ -225,7 +208,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := stsTokenExchangeRequest{ + stsRequest := stsexchange.TokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -235,7 +218,8 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, @@ -248,7 +232,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { "userProject": conf.WorkforcePoolUserProject, } } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) if err != nil { return nil, err } diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go new file mode 100644 index 000000000..843d1c330 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -0,0 +1,313 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go index e953ddb47..f35f73c5c 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -19,6 +19,10 @@ type fileCredentialSource struct { Format format } +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go new file mode 100644 index 000000000..1d5aad2e2 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 8251fc85e..54c8f209f 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -48,12 +48,19 @@ type ImpersonateTokenSource struct { // Each service account must be granted roles/iam.serviceAccountTokenCreator // on the next service account in the chain. Optional. Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int } // Token performs the exchange to get a temporary service account token to allow access to GCP. func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } reqBody := generateAccessTokenReq{ - Lifetime: "3600s", + Lifetime: lifetimeString, Scope: its.Scopes, Delegates: its.Delegates, } diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index 16dca6541..606bb4e80 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -23,6 +23,10 @@ type urlCredentialSource struct { ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 000000000..cb5820707 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce29..ebd520eac 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5fc..1a0bebd15 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/event-exporter/vendor/golang.org/x/oauth2/google/jwt.go b/event-exporter/vendor/golang.org/x/oauth2/google/jwt.go index 67d97b990..e89e6ae17 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/google/jwt.go +++ b/event-exporter/vendor/golang.org/x/oauth2/google/jwt.go @@ -66,7 +66,8 @@ func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.Toke if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { diff --git a/event-exporter/vendor/golang.org/x/oauth2/internal/client_appengine.go b/event-exporter/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d9..d28140f78 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/event-exporter/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/event-exporter/vendor/golang.org/x/oauth2/internal/oauth2.go b/event-exporter/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf..14989beaf 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/event-exporter/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/event-exporter/vendor/golang.org/x/oauth2/internal/token.go b/event-exporter/vendor/golang.org/x/oauth2/internal/token.go index 355c38696..e83ddeef0 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/internal/token.go +++ b/event-exporter/vendor/golang.org/x/oauth2/internal/token.go @@ -18,9 +18,8 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -57,12 +56,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -111,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -185,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -218,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -229,7 +253,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } @@ -238,21 +262,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -267,8 +299,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -278,17 +316,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/event-exporter/vendor/golang.org/x/oauth2/jws/jws.go b/event-exporter/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271..95015648b 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/jws/jws.go +++ b/event-exporter/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/event-exporter/vendor/golang.org/x/oauth2/oauth2.go b/event-exporter/vendor/golang.org/x/oauth2/oauth2.go index 291df5c83..90a2c3d6d 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/oauth2.go +++ b/event-exporter/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -57,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -70,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -138,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -161,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -206,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, @@ -290,6 +300,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +317,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +392,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/pkce.go b/event-exporter/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/event-exporter/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/event-exporter/vendor/golang.org/x/oauth2/token.go b/event-exporter/vendor/golang.org/x/oauth2/token.go index 822720341..5bbb33217 100644 --- a/event-exporter/vendor/golang.org/x/oauth2/token.go +++ b/event-exporter/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } @@ -154,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) @@ -165,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/event-exporter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..269e173ca --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/byteorder.go b/event-exporter/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..271055be0 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..4756ad5f7 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,290 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_aix.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 000000000..9bf0c32eb --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..301b752e9 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..f3eb993bf --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..fcb9a3888 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..a8acd3e32 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..c8ae6ddc1 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..910728fb1 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..7f1946780 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..9526d2ce3 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..3f73a05dc --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..99c60fe9f --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..743eb5435 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..a968b80fa --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,111 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..4686c1d54 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..cd63e7335 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x + +package cpu + +func doinit() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..197188e67 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..558635850 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..fedb00cc4 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..ffb4ec7eb --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000..85b64d5cc --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000..054ba05d6 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..e9ecf2a45 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..5341e7f88 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..5f8f2419a --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000..89608fba2 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000..5ab87808f --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..c14f12b14 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..7f0c79c00 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..1fb4b7013 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..384787ea3 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/cpu_x86.go b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..c29f5e4c5 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/parse.go b/event-exporter/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000..762b63d68 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/event-exporter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000..4cd64c704 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 000000000..5f92ac9a2 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 000000000..4c9788ea8 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..1b9ccb091 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..e8b6cdbe9 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/event-exporter/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index c9b69937a..73687de74 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.5 -// +build go1.5 package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/event-exporter/vendor/golang.org/x/sys/plan9/pwd_plan9.go index 98bf56b73..fb9458218 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.5 -// +build !go1.5 package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/race.go b/event-exporter/vendor/golang.org/x/sys/plan9/race.go index 62377d2ff..c02d9ed33 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/race.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && race -// +build plan9,race package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/race0.go b/event-exporter/vendor/golang.org/x/sys/plan9/race0.go index f8da30876..7b15e15f6 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/race0.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && !race -// +build plan9,!race package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/str.go b/event-exporter/vendor/golang.org/x/sys/plan9/str.go index 55fa8d025..ba3e8ff8a 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/str.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/syscall.go b/event-exporter/vendor/golang.org/x/sys/plan9/syscall.go index 67e5b0115..d631fd664 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/syscall.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 // Package plan9 contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 3f40b9bd7..f780d5c80 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && 386 -// +build plan9,386 package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 0e6a96aa4..7de61065f 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && amd64 -// +build plan9,amd64 package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 244c501b7..ea85780f0 100644 --- a/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && arm -// +build plan9,arm package plan9 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/aliases.go b/event-exporter/vendor/golang.org/x/sys/unix/aliases.go index abc89c104..e7d3df4bd 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/aliases.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2e..269e173ca 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3d..a4fcef0e0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349a..1e63615c5 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4adc..6496c3100 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a7391..4fd1f54da 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a8489..42f7eb9e4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019ea..f8902667e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_386.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d07..3b4734870 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43c..67e29f317 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d5140..d6ae269ce 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae02760..01e5e253c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 565357288..2abf12f6e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2ce..f84bae712 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c6..f08f62807 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d498934..bdfc024d2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d5..2e8c99612 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab339..2c394b11e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169c..fab586a2c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c1..f949ec547 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/event-exporter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e1858..2f67ba86d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/cap_freebsd.go b/event-exporter/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb8..a08657890 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/constants.go b/event-exporter/vendor/golang.org/x/sys/unix/constants.go index 394a3965b..6fb7cb77d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/constants.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a998508..d78513461 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0a..623a5e697 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/dev_zos.go b/event-exporter/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a0..bb6a64fe9 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/dirent.go b/event-exporter/vendor/golang.org/x/sys/unix/dirent.go index 2499f977b..1ebf11782 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/dirent.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/endian_big.go b/event-exporter/vendor/golang.org/x/sys/unix/endian_big.go index a52026557..1095fd31d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/endian_big.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/endian_little.go b/event-exporter/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4ae..b9f0e277b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/endian_little.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/env_unix.go b/event-exporter/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d13..a96da71f4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/env_unix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/epoll_zos.go b/event-exporter/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e02..7753fddea 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/fcntl.go b/event-exporter/vendor/golang.org/x/sys/unix/fcntl.go index e9b991258..6200876fb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/fcntl.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/event-exporter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808b..13b4acd5c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/fdset.go b/event-exporter/vendor/golang.org/x/sys/unix/fdset.go index a8068f94f..9e83d18cd 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/fdset.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/event-exporter/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f4..c8bde601e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/gccgo.go b/event-exporter/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d74..aca5721dd 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/gccgo.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/gccgo_c.c b/event-exporter/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c542..d468b7b47 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/event-exporter/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/event-exporter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3d..972d61bd7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ifreq_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a510..848840ae4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c0851..dbe680eab 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_signed.go b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580e..5b0759bd8 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1e..20f470b9d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_zos.go b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf76..c8b2a750f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/mkerrors.sh b/event-exporter/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7eb..fdcaa974d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/event-exporter/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -519,6 +521,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +563,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -581,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || @@ -602,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || @@ -663,7 +669,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/event-exporter/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/event-exporter/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca0513632..4b68e5978 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/mremap.go b/event-exporter/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa9..fd45fe529 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/mremap.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/pagesize_unix.go b/event-exporter/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5b..4d0a3430e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/event-exporter/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b2..6a09af53e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/event-exporter/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7..3f0975f3d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ptrace_ios.go b/event-exporter/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a01..a4d35db5d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/race.go b/event-exporter/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec5..714d2aae7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/race.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/race0.go b/event-exporter/vendor/golang.org/x/sys/unix/race0.go index 706e1322a..4a9f6634c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/race0.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d6257569..dbd2b6ccb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c4..130398b6b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f6..c3a62dbb1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a5..4a1eab37e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall.go index 63e8c8383..5ea74da98 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cdac..67ce6cef2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa95..1fdaa4760 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0f..c87f9a9f4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_bsd.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de518..a00c3e545 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -317,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9..0eaecf5fc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec9963..f36c6707c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641f..16dc69937 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d32120..14bab6b2d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da51004..3967bca77 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c483..eff19ada2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093f..4f24b517a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0e..ac30759ec 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b37..aab725ca7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd4673..ba46651f8 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e4..df89f9e6b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_illumos.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a8..a863f7052 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e50224..0f85e29e6 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1301,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -2482,3 +2491,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945ea..506dafa7b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6a..38d55641b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce36..d557cf8de 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3aa..facdb83b2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da2986415..cd2dd797f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689a..cf2ee6c75 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7d..ffc4c2b63 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb489..9ebfdcf44 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fccd..5f2b57c4c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af242..d1a3ad826 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a12299..f2f67423e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec1..3d0e98451 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d25..70963a95a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ffd..c218ebd28 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a3..e6c48500c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa2574..7286a9aa8 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee12..6f5a28894 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5d..66f31210d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae7..11d1f1698 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282f..7a5eb5743 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e9..62d8957ae 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f9..ce6a06885 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd81..d46d689d1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b5..b25343c71 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -171,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -326,4 +335,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdcb..9ddc89f4f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360e..70a3c96ee 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d4..265caa87f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99d..ac4fda171 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f41..0a451e6dd 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139c..30a308cbb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7ff..ea954330f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa134..21974af06 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef81..e02d8ceae 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda2705..77081de8c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca58..05c95bccf 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707acf..23f39b7af 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041c..b473038c6 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix @@ -1105,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { diff --git a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437f..4fcd38de2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17b..79a84f18b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefdb..9eb0db664 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/timestruct.go b/event-exporter/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b284..7997b1902 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/timestruct.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/event-exporter/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae77..cb7e598ce 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/event-exporter/vendor/golang.org/x/sys/unix/xattr_bsd.go b/event-exporter/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f36..e16879396 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b79..2fb219d78 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26f..b0e6f5c85 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 143007627..e40fa8524 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a742..bb02aa6c0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e44..c0e0f8694 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c5138..6c6923906 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3be..dd9163f8e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69def..493a2a793 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2db..8b437b307 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d962..67c02dd57 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479b..36bf8399f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,13 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +523,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +779,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1703,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1779,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -1795,6 +1803,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -2120,6 +2129,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2275,6 +2338,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -2403,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2607,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2851,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3013,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3461,6 +3557,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3570,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a5..42ff8c3c1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa5127..dca436004 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go @@ -283,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cdc..5cca668ac 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go @@ -289,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a3..d8cae6d15 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go @@ -279,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c9..28e39afdc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d34..cd66e92cb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0b..c1595eba7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f8..ee9456b0d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd96..8cfca81e1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f7..60b0deb3a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go @@ -337,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec1..f90aa7281 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go @@ -341,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd7..ba9e01503 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go @@ -341,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1c..07cdfd6e9 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 @@ -270,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f5..2f1dd214a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go @@ -345,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c6333..f40519d90 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go @@ -336,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d2..130085df4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c08..84769a1a3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749f..602ded003 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba192..efc0406ee 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474b..5a6500f83 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2b..a5aeeb979 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e4..0e9748a72 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe754..4f4449abc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe35..76a363f0f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1e..43ca0cdfd 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d403031..b1b8bb200 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a08..d2ddd3176 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506f..4dfd2e051 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca28..586317c78 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f79430..d7c881be7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e64..2d2de5d29 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e3..5adc79fb5 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d2331..6ea64a3c0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18adc..99ee4399a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae50..b68a78362 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b09..0a87450bf 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e9..ccb02f240 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0e..1b40b997b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64a..aad65fc79 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d145..c0096391a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508accac..7664df749 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead4..ae099182c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dcaa..11fd5d45b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 581849197..c3d2d6530 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd19..c698cbc01 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec74..1488d2712 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -2195,3 +2209,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc2..4def3e9fc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf83..fef2bc8ba 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c7..a9fd76a88 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe9..460065028 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e1..c8987d264 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb7..921f43061 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e9..44f067829 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718f..e7fa0abf0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bbd..8c5125675 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf98..7392fd45e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f40990264..41180434e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc2997..40c6ce7ae 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed7..2cfe34adb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde32237..61e6f0709 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65bf..834b84204 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5bac..e91ebc14a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556bab..be28babbc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917a..fb587e826 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2dc..d576438bb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b6456..9dc42410b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30d..41b561731 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc112..0d3a0751c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b0..4019a656f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb328..c39f7776d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c92231404..ac4af24f9 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fda..57571d072 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c92..f77d53212 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de509..e62963e67 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bceab..fae140b62 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99ae..00831354c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f76600..9d1e0ff06 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -801,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360f..79029ed58 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b2..da115f9a4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b40189464..829b87feb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4b..94f011238 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e048471..3a58ae819 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf83..dcb7a0eb7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd8..db5a7bf13 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4a..7be575a77 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a8..d6e3174c6 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e44054470..ee97157d0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fce..35c3b91d0 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9b..5edda7687 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bbf..0dc9e8b4d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0c..308ddf3a1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d96107..418664e3d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b838..34d0b86d7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc106..b71cf45e2 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc4274..e32df1c1e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0c..15ad6111f 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d3..0cc3ce496 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4e..856d92d69 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37ee..8d467094c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff81..edc173244 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f710..445eba206 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3d..adba01bca 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,9 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a0..014c4e9c7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,9 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f4987..ccc97d74d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,9 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcadc..ec2b64a95 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,9 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b2555..21a839e33 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d0..c11121ec3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e3..909b631fc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d64..e49bed16e 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc8..66017d2d3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761c..47bab18dc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,9 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eba..b2aa8cd49 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f1..524a1b1c9 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6db..d59b943ac 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952efa..31e771d53 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 597733813..9fd77c6cb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af29189..af10af28c 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a97..cc2028af4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef5910..c06dd4415 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01f..9ddbf3e08 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa24..19a6ee413 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0ec..05192a782 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad43..b2e308581 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1d..3e6d57cae 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c5..3a219bdce 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3d..091d107f3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10ea..28ff4ef74 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b8..30e405bb4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc48337..6cbd094a3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b2890..7c03b6ee7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb15..422107ee8 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a88..505a12acf 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975e..cc986c790 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b42..dc0c955ee 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -175,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -456,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -552,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2672,6 +2675,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2716,6 +2720,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2726,6 +2735,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2743,6 +2764,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2763,6 +2786,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2779,6 +2803,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2867,6 +2893,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2902,6 +2930,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2980,6 +3009,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 @@ -3368,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4152,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5103,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5516,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 @@ -5883,3 +5919,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc5..438a30aff 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c688..adceca355 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c2..eeaa00a37 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc4216..6739aa91d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc6..9920ef631 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7ccd..2923b799a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd332..ce2750ee4 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77a..3038811d7 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b8757..efc6fed18 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840db..9a654b75a 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 034874395..40d358e33 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad067047..148c6ceb8 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c32..72ba81543 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d025..71e765508 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6c..4abbdb9de 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9d..f22e7947d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655d..066a7d83d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a19..439548ec9 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151c..16085d3bb 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b7..afd13a3af 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a5479886..5d97f1f9b 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1f..34871cdc1 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266c..5911bceb3 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb12..e4f24f3bc 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c010..ca50a7930 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a43..d7d7f7902 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c6..14160576d 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb3..54f31be63 100644 --- a/event-exporter/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/event-exporter/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/event-exporter/vendor/golang.org/x/sys/windows/aliases.go b/event-exporter/vendor/golang.org/x/sys/windows/aliases.go index a20ebea63..ce2d713d6 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/aliases.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/empty.s b/event-exporter/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd31..ba64caca5 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/empty.s +++ b/event-exporter/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/event-exporter/vendor/golang.org/x/sys/windows/env_windows.go b/event-exporter/vendor/golang.org/x/sys/windows/env_windows.go index b8ad19250..d4577a423 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/env_windows.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/event-exporter/vendor/golang.org/x/sys/windows/eventlog.go b/event-exporter/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645e..6c366955d 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/eventlog.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/mksyscall.go b/event-exporter/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c5..dbcdb090c 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/race.go b/event-exporter/vendor/golang.org/x/sys/windows/race.go index 9196b089c..0f1bdc386 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/race.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/race0.go b/event-exporter/vendor/golang.org/x/sys/windows/race0.go index 7bae4817a..0c78da78b 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/race0.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/service.go b/event-exporter/vendor/golang.org/x/sys/windows/service.go index c44a1b963..a9dc6308d 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/service.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/str.go b/event-exporter/vendor/golang.org/x/sys/windows/str.go index 4fc01434e..6a4f9ce6a 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/str.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/event-exporter/vendor/golang.org/x/sys/windows/syscall.go b/event-exporter/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb95..e85ed6b9c 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/syscall.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/event-exporter/vendor/golang.org/x/sys/windows/syscall_windows.go b/event-exporter/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57ca..6395a031d 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } @@ -155,6 +154,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -192,6 +193,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -233,6 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +972,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/event-exporter/vendor/golang.org/x/sys/windows/types_windows.go b/event-exporter/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c85..359780f6a 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/types_windows.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/event-exporter/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/event-exporter/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de4..e8791c82c 100644 --- a/event-exporter/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/event-exporter/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,6 +184,7 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") @@ -253,6 +254,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -329,6 +331,7 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") @@ -339,6 +342,7 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -1604,6 +1608,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -2185,6 +2198,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2870,6 +2891,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2960,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { diff --git a/event-exporter/vendor/golang.org/x/term/term_unix.go b/event-exporter/vendor/golang.org/x/term/term_unix.go index 62c2b3f41..1ad0ddfe3 100644 --- a/event-exporter/vendor/golang.org/x/term/term_unix.go +++ b/event-exporter/vendor/golang.org/x/term/term_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/event-exporter/vendor/golang.org/x/term/term_unix_bsd.go b/event-exporter/vendor/golang.org/x/term/term_unix_bsd.go index 853b3d698..9dbf54629 100644 --- a/event-exporter/vendor/golang.org/x/term/term_unix_bsd.go +++ b/event-exporter/vendor/golang.org/x/term/term_unix_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package term diff --git a/event-exporter/vendor/golang.org/x/term/term_unix_other.go b/event-exporter/vendor/golang.org/x/term/term_unix_other.go index 1e8955c93..1b36de799 100644 --- a/event-exporter/vendor/golang.org/x/term/term_unix_other.go +++ b/event-exporter/vendor/golang.org/x/term/term_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || solaris || zos -// +build aix linux solaris zos package term diff --git a/event-exporter/vendor/golang.org/x/term/term_unsupported.go b/event-exporter/vendor/golang.org/x/term/term_unsupported.go index f1df85065..3c409e588 100644 --- a/event-exporter/vendor/golang.org/x/term/term_unsupported.go +++ b/event-exporter/vendor/golang.org/x/term/term_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 package term diff --git a/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4a..784bb8808 100644 --- a/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a92001..8e1e94395 100644 --- a/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72c..d2bd71181 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea2..f76bdca27 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b4..3aa2c3bdf 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7beb..a71375790 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce5802..f15746f7d 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb20..c164d3791 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a078827..1af161c75 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c43..eb73ecc37 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b273300..276cb8d8c 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8a..0cceffd73 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b879..b0819e42d 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae50..bf65457d9 100644 --- a/event-exporter/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/event-exporter/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/event-exporter/vendor/golang.org/x/time/AUTHORS b/event-exporter/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/event-exporter/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/event-exporter/vendor/golang.org/x/time/CONTRIBUTORS b/event-exporter/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/event-exporter/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/event-exporter/vendor/golang.org/x/time/rate/rate.go b/event-exporter/vendor/golang.org/x/time/rate/rate.go index b0b982e9c..8f6c7f493 100644 --- a/event-exporter/vendor/golang.org/x/time/rate/rate.go +++ b/event-exporter/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit @@ -80,6 +82,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +104,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +140,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +165,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +173,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +185,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +211,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +238,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +265,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +302,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +319,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +333,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +342,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +354,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,16 +380,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -369,20 +394,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/event-exporter/vendor/golang.org/x/time/rate/sometimes.go b/event-exporter/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/event-exporter/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/event-exporter/vendor/google.golang.org/api/googleapi/googleapi.go b/event-exporter/vendor/google.golang.org/api/googleapi/googleapi.go index 2d3e00edc..b5e38c662 100644 --- a/event-exporter/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/event-exporter/vendor/google.golang.org/api/googleapi/googleapi.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -79,6 +78,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +124,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -132,7 +143,7 @@ func CheckResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) @@ -172,10 +183,11 @@ func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } @@ -382,11 +394,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ diff --git a/event-exporter/vendor/google.golang.org/api/googleapi/transport/apikey.go b/event-exporter/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2e..f5d826c2a 100644 --- a/event-exporter/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/event-exporter/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/event-exporter/vendor/google.golang.org/api/internal/cba.go b/event-exporter/vendor/google.golang.org/api/internal/cba.go new file mode 100644 index 000000000..fbf4ef1c6 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/api/internal/cba.go @@ -0,0 +1,318 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cba.go (certificate-based access) contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials +// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// If running within Google's cloud environment, and client certificate is not specified +// and not available through DCA, we will try mTLS with credentials held by +// the Secure Session Agent, which is part of Google's cloud infrastructure. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/url" + "os" + "strings" + + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/api/internal/cert" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// getClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" { + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + // if settings.DefaultEndpointTemplate == "" { + // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com") + // } + endpoint = resolvedDefaultEndpoint(settings) + } + return clientCertSource, endpoint, nil +} + +type transportConfig struct { + clientCertSource cert.Source // The client certificate source. + endpoint string // The corresponding endpoint to use based on client certificate source. + s2aAddress string // The S2A address if it can be used, otherwise an empty string. + s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. +} + +func getTransportConfig(settings *DialSettings) (*transportConfig, error) { + clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: "", + s2aMTLSEndpoint: "", + } + + if !shouldUseS2A(clientCertSource, settings) { + return &defaultTransportConfig, nil + } + if !settings.IsUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: settings.DefaultMTLSEndpoint, + }, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + if isMTLS(clientCertSource) { + if !settings.IsUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return settings.DefaultMTLSEndpoint, nil + } + return resolvedDefaultEndpoint(settings), nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if resolvedDefaultEndpoint(settings) == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint) +} + +func isMTLS(clientCertSource cert.Source) bool { + mtlsMode := getMTLSMode() + return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) +} + +// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the +// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the +// deprecated DefaultEndpoint value. +func resolvedDefaultEndpoint(settings *DialSettings) string { + if settings.DefaultEndpointTemplate == "" { + return settings.DefaultEndpoint + } + return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, +// and the endpoint to use for HTTP client. +func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, nil, "", err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil +} + +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return !settings.EnableDirectPath && !settings.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/cert/default_cert.go b/event-exporter/vendor/google.golang.org/api/internal/cert/default_cert.go new file mode 100644 index 000000000..21d025153 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/api/internal/cert/default_cert.go @@ -0,0 +1,58 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. +type defaultCertData struct { + once sync.Once + source Source + err error +} + +var ( + defaultCert defaultCertData +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultSource() (Source, error) { + defaultCert.once.Do(func() { + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } + }) + return defaultCert.source, defaultCert.err +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/cert/enterprise_cert.go b/event-exporter/vendor/google.golang.org/api/internal/cert/enterprise_cert.go new file mode 100644 index 000000000..1061b5f05 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -0,0 +1,54 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/event-exporter/vendor/google.golang.org/api/transport/cert/default_cert.go b/event-exporter/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go similarity index 57% rename from event-exporter/vendor/google.golang.org/api/transport/cert/default_cert.go rename to event-exporter/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go index 04aefec0a..afd79ffe2 100644 --- a/event-exporter/vendor/google.golang.org/api/transport/cert/default_cert.go +++ b/event-exporter/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC. +// Copyright 2022 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -18,7 +18,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -32,65 +31,48 @@ const ( metadataFile = "context_aware_metadata.json" ) -// defaultCertData holds all the variables pertaining to -// the default certficate source created by DefaultSource. -type defaultCertData struct { - once sync.Once - source Source - err error - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate -} - -var ( - defaultCert defaultCertData -) - -// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. -type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json -// -// If that file does not exist, a nil source is returned. -func DefaultSource() (Source, error) { - defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = newSecureConnectSource() - }) - return defaultCert.source, defaultCert.err -} - type secureConnectSource struct { metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate } type secureConnectMetadata struct { Cmd []string `json:"cert_provider_command"` } -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) } + + file, err := os.ReadFile(configFilePath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } return nil, err } var metadata secureConnectMetadata if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) } if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) } return (&secureConnectSource{ metadata: metadata, @@ -105,10 +87,10 @@ func validateMetadata(metadata secureConnectMetadata) error { } func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - defaultCert.cachedCertMutex.Lock() - defer defaultCert.cachedCertMutex.Unlock() - if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { - return defaultCert.cachedCert, nil + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil } // Expand OS environment variables in the cert provider command such as "$HOME". for i := 0; i < len(s.metadata.Cmd); i++ { @@ -117,14 +99,13 @@ func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestI command := s.metadata.Cmd data, err := exec.Command(command[0], command[1:]...).Output() if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. return nil, err } cert, err := tls.X509KeyPair(data, data) if err != nil { return nil, err } - defaultCert.cachedCert = &cert + s.cachedCert = &cert return &cert, nil } diff --git a/event-exporter/vendor/google.golang.org/api/internal/creds.go b/event-exporter/vendor/google.golang.org/api/internal/creds.go index b067a179b..b64893098 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/creds.go +++ b/event-exporter/vendor/google.golang.org/api/internal/creds.go @@ -6,17 +6,24 @@ package internal import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" - "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" + "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -41,7 +48,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) + data, err := os.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } @@ -70,17 +77,34 @@ const ( // // - A self-signed JWT flow will be executed if the following conditions are // met: -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation +// +// (1) At least one of the following is true: +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users +// (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, err := getClientCertificateSource(ds) + if err != nil { + return nil, err + } + params.TokenURL = oAuth2Endpoint(clientCertSource) + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) if err != nil { return nil, err } @@ -101,22 +125,37 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g return cred, err } +func oAuth2Endpoint(clientCertSource cert.Source) string { + if isMTLS(clientCertSource) { + return google.MTLSTokenURL + } + return google.Endpoint.TokenURL +} + func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { - if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && - ds.ImpersonationConfig == nil { - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(data, &f); err != nil { - return false, err - } - return f.Type == serviceAccountKey, nil + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs with scopes. + if !ds.IsUniverseDomainGDU() { + return typeServiceAccount(data) + } + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil { + return typeServiceAccount(data) } return false, nil } +// typeServiceAccount checks if JSON data is for a service account. +func typeServiceAccount(data []byte) (bool, error) { + var f struct { + Type string `json:"type"` + // The remaining JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil +} + func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { // Scopes are preferred in self-signed JWT unless the scope is not available @@ -130,14 +169,22 @@ func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource } } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject @@ -156,3 +203,37 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * ProjectID: creds.ProjectID, }, nil } + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} + +// ErrUniverseNotMatch composes an error string from the provided universe +// domain sources (DialSettings and Credentials, respectively). +func ErrUniverseNotMatch(settingsUD, credsUD string) error { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "WithUniverseDomain explicitly, \"googleapis.com\" is the default", + settingsUD, + credsUD) +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/error.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 000000000..886c6532b --- /dev/null +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/json.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189..eab49a11e 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { @@ -85,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -119,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/media.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/media.go index d14a22470..c048a5708 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/media.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -17,92 +16,10 @@ import ( "sync" "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -234,7 +151,10 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) @@ -245,7 +165,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -289,17 +213,16 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) - rm := ioutil.NopCloser(fm()) + rb := io.NopCloser(fb()) + rm := io.NopCloser(fm()) var mimeBoundary string if _, params, err := mime.ParseMediaType(ctype); err == nil { mimeBoundary = params["boundary"] @@ -309,18 +232,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := io.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/params.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/params.go index 6703721ff..1a30d2ca2 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded†form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/resumable.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0eae147fa..08e7aacef 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,8 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" + + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -38,6 +42,11 @@ type ResumableUpload struct { // ChunkRetryDeadline configures the per-chunk deadline after which no further // retries should happen. ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metrics + // and idempotency headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -72,6 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + + // Set idempotency token header which is used by GCS uploads. + req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -178,22 +196,29 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err for { var pause time.Duration - // Each chunk gets its own initialized-at-zero backoff. + // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() - quitAfter := time.After(retryDeadline) + quitAfterTimer := time.NewTimer(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { + pauseTimer := time.NewTimer(pause) select { case <-ctx.Done(): + quitAfterTimer.Stop() + pauseTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-time.After(pause): - case <-quitAfter: + case <-pauseTimer.C: + case <-quitAfterTimer.C: + pauseTimer.Stop() return prepareReturn(resp, err) } + pauseTimer.Stop() // Check for context cancellation or timeout once more. If more than one // case in the select statement above was satisfied at the same time, Go @@ -202,11 +227,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // canceled before or the timeout was reached. select { case <-ctx.Done(): + quitAfterTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-quitAfter: + case <-quitAfterTimer.C: return prepareReturn(resp, err) default: } @@ -220,9 +246,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // Check if we should retry the request. if !errorFunc(status, err) { + quitAfterTimer.Stop() break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/retry.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/retry.go index 873dab363..20b57d925 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -5,7 +5,10 @@ package gensupport import ( + "errors" "io" + "net" + "strings" "time" "github.com/googleapis/gax-go/v2" @@ -65,6 +68,14 @@ func shouldRetry(status int, err error) bool { return true } } + var opErr *net.OpError + if errors.As(err, &opErr) { + if strings.Contains(opErr.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped // errors. if err, ok := err.(interface{ Unwrap() error }); ok { diff --git a/event-exporter/vendor/google.golang.org/api/internal/gensupport/send.go b/event-exporter/vendor/google.golang.org/api/internal/gensupport/send.go index dd50cc20a..f39dd00d9 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/event-exporter/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,17 +8,52 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" "time" + "github.com/google/uuid" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) +} + // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -53,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -71,6 +116,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var resp *http.Response var err error + attempts := 1 + invocationID := uuid.New().String() + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") // Loop to retry the request, up to the context deadline. var pause time.Duration @@ -88,15 +136,17 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var errorFunc = retry.errorFunc() for { + t := time.NewTimer(pause) select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + t.Stop() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err - case <-time.After(pause): + return resp, ctx.Err() + case <-t.C: } if ctx.Err() != nil { @@ -104,12 +154,20 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r // select is satisfied at the same time, Go will choose one arbitrarily. // That can cause an operation to go through even if the context was // canceled before. - if err == nil { - err = ctx.Err() + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() } + // Set retry metrics and idempotency headers for GCS. + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) + req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) + resp, err = client.Do(req.WithContext(ctx)) var status int @@ -123,6 +181,7 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r if req.GetBody == nil || !errorFunc(status, err) { break } + attempts++ var errBody error req.Body, errBody = req.GetBody() if errBody != nil { diff --git a/event-exporter/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/event-exporter/vendor/google.golang.org/api/internal/impersonate/impersonate.go index b465bbcd1..4b2c775f2 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/impersonate/impersonate.go +++ b/event-exporter/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -105,7 +104,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/event-exporter/vendor/google.golang.org/api/internal/s2a.go b/event-exporter/vendor/google.golang.org/api/internal/s2a.go new file mode 100644 index 000000000..c70f2419b --- /dev/null +++ b/event-exporter/vendor/google.golang.org/api/internal/s2a.go @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "log" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" + +// The period an MTLS config can be reused before needing refresh. +var configExpiry = time.Hour + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + once sync.Once +) + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + once.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/settings.go b/event-exporter/vendor/google.golang.org/api/internal/settings.go index 76efdb227..e17141a6f 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/settings.go +++ b/event-exporter/vendor/google.golang.org/api/internal/settings.go @@ -9,6 +9,9 @@ import ( "crypto/tls" "errors" "net/http" + "os" + "strconv" + "time" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -16,11 +19,17 @@ import ( "google.golang.org/grpc" ) +const ( + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + universeDomainDefault = "googleapis.com" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { Endpoint string DefaultEndpoint string + DefaultEndpointTemplate string DefaultMTLSEndpoint string Scopes []string DefaultScopes []string @@ -46,7 +55,11 @@ type DialSettings struct { SkipValidation bool ImpersonationConfig *impersonate.Config EnableDirectPath bool + EnableDirectPathXds bool + EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool + UniverseDomain string + DefaultUniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -76,6 +89,16 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + return b + } + return false +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { @@ -140,3 +163,66 @@ func (ds *DialSettings) Validate() error { } return nil } + +// GetDefaultUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with internaloption.WithDefaultUniverseDomain. +// The default value is "googleapis.com". +func (ds *DialSettings) GetDefaultUniverseDomain() string { + if ds.DefaultUniverseDomain == "" { + return universeDomainDefault + } + return ds.DefaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with option.WithUniverseDomain. +// The default value is the value of GetDefaultUniverseDomain, as configured +// with internaloption.WithDefaultUniverseDomain. +func (ds *DialSettings) GetUniverseDomain() string { + if ds.UniverseDomain == "" { + return ds.GetDefaultUniverseDomain() + } + return ds.UniverseDomain +} + +func (ds *DialSettings) IsUniverseDomainGDU() bool { + return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, from google.Credentials, for comparison with the value returned by +// (*DialSettings).GetUniverseDomain. This wrapper function should be removed +// to close [TODO(chrisdsmith): issue link here]. See details below. +func GetUniverseDomain(creds *google.Credentials) (string, error) { + timer := time.NewTimer(time.Second) + defer timer.Stop() + errors := make(chan error) + results := make(chan string) + + go func() { + result, err := creds.GetUniverseDomain() + if err != nil { + errors <- err + return + } + results <- result + }() + + select { + case err := <-errors: + // An error that is returned before the timer expires is legitimate. + return "", err + case res := <-results: + return res, nil + case <-timer.C: // Timer is expired. + // If err or res was not returned, it means that creds.GetUniverseDomain() + // did not complete in 1s. Assume that MDS is likely never responding to + // the endpoint and will timeout. This is the source of issues such as + // https://github.com/googleapis/google-cloud-go/issues/9350. + // Temporarily (2024-02-02) return the GDU domain. Restore the original + // calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go + // and remove this method to close + // https://github.com/googleapis/google-api-go-client/issues/2399. + return universeDomainDefault, nil + } +} diff --git a/event-exporter/vendor/google.golang.org/api/internal/version.go b/event-exporter/vendor/google.golang.org/api/internal/version.go index 24d6116a1..60852aed6 100644 --- a/event-exporter/vendor/google.golang.org/api/internal/version.go +++ b/event-exporter/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.70.0" +const Version = "0.167.0" diff --git a/event-exporter/vendor/google.golang.org/api/logging/v2/logging-api.json b/event-exporter/vendor/google.golang.org/api/logging/v2/logging-api.json index 259dbb9f0..5dc5b737f 100644 --- a/event-exporter/vendor/google.golang.org/api/logging/v2/logging-api.json +++ b/event-exporter/vendor/google.golang.org/api/logging/v2/logging-api.json @@ -27,6 +27,13 @@ "description": "Writes log entries and manages your Cloud Logging configuration.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/logging/docs/", + "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://logging.me-central2.rep.googleapis.com/", + "location": "me-central2" + } + ], "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -120,7 +127,7 @@ "billingAccounts": { "methods": { "getCmekSettings": { - "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/billingAccounts/{billingAccountsId}/cmekSettings", "httpMethod": "GET", "id": "logging.billingAccounts.getCmekSettings", @@ -129,7 +136,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -148,7 +155,7 @@ ] }, "getSettings": { - "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.", "flatPath": "v2/billingAccounts/{billingAccountsId}/settings", "httpMethod": "GET", "id": "logging.billingAccounts.getSettings", @@ -157,7 +164,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, @@ -177,72 +184,6 @@ } }, "resources": { - "buckets": { - "methods": { - "get": { - "description": "Gets a log bucket.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/buckets/{bucketsId}", - "httpMethod": "GET", - "id": "logging.billingAccounts.buckets.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", - "location": "path", - "pattern": "^billingAccounts/[^/]+/buckets/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}", - "response": { - "$ref": "LogBucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ] - } - }, - "resources": { - "views": { - "methods": { - "get": { - "description": "Gets a view on a log bucket..", - "flatPath": "v2/billingAccounts/{billingAccountsId}/buckets/{bucketsId}/views/{viewsId}", - "httpMethod": "GET", - "id": "logging.billingAccounts.buckets.views.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the policy: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", - "location": "path", - "pattern": "^billingAccounts/[^/]+/buckets/[^/]+/views/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}", - "response": { - "$ref": "LogView" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ] - } - } - } - } - }, "exclusions": { "methods": { "create": { @@ -516,6 +457,40 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "createAsync": { + "description": "Creates a log bucket asynchronously that can be used to store log entries.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets:createAsync", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.buckets.createAsync", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets:createAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "delete": { "description": "Deletes a log bucket.Changes the bucket's lifecycle_state to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all log entries in the bucket will be permanently deleted.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", @@ -542,6 +517,34 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Gets a log bucket.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.buckets.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { "description": "Lists log buckets.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets", @@ -582,7 +585,7 @@ ] }, "patch": { - "description": "Updates a log bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "description": "Updates a log bucket.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}", "httpMethod": "PATCH", "id": "logging.billingAccounts.locations.buckets.patch", @@ -644,39 +647,74 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "updateAsync": { + "description": "Updates a log bucket asynchronously.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}:updateAsync", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.buckets.updateAsync", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}:updateAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } }, "resources": { - "views": { + "links": { "methods": { "create": { - "description": "Creates a view over log entries in a log bucket. A bucket may contain a maximum of 30 views.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", - "id": "logging.billingAccounts.locations.buckets.views.create", + "id": "logging.billingAccounts.locations.buckets.links.create", "parameterOrder": [ "parent" ], "parameters": { + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "location": "query", + "type": "string" + }, "parent": { - "description": "Required. The bucket in which to create the view `\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"` For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", "location": "path", "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, "type": "string" - }, - "viewId": { - "description": "Required. The id to use for this view.", - "location": "query", - "type": "string" } }, - "path": "v2/{+parent}/views", + "path": "v2/{+parent}/links", "request": { - "$ref": "LogView" + "$ref": "Link" }, "response": { - "$ref": "LogView" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -684,62 +722,90 @@ ] }, "delete": { - "description": "Deletes a view on a log bucket. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can delete the view. If this occurs, please try again in a few minutes.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", + "description": "Deletes a link. This will also delete the corresponding BigQuery linked dataset.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", "httpMethod": "DELETE", - "id": "logging.billingAccounts.locations.buckets.views.delete", + "id": "logging.billingAccounts.locations.buckets.links.delete", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The full resource name of the view to delete: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", "location": "path", - "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", "required": true, "type": "string" } }, "path": "v2/{+name}", "response": { - "$ref": "Empty" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Gets a link.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.buckets.links.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the link: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Link" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { - "description": "Lists views on a log bucket.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views", + "description": "Lists links.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "GET", - "id": "logging.billingAccounts.locations.buckets.views.list", + "id": "logging.billingAccounts.locations.buckets.links.list", "parameterOrder": [ "parent" ], "parameters": { "pageSize": { - "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "description": "Optional. The maximum number of results to return from this request.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response.", "location": "query", "type": "string" }, "parent": { - "description": "Required. The bucket whose views are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "description": "Required. The parent resource whose links are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", "location": "path", "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, "type": "string" } }, - "path": "v2/{+parent}/views", + "path": "v2/{+parent}/links", "response": { - "$ref": "ListViewsResponse" + "$ref": "ListLinksResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -747,31 +813,34 @@ "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read" ] - }, - "patch": { - "description": "Updates a view on a log bucket. This method replaces the following fields in the existing view with values from the new view: filter. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", - "httpMethod": "PATCH", - "id": "logging.billingAccounts.locations.buckets.views.patch", + } + } + }, + "views": { + "methods": { + "create": { + "description": "Creates a view over log entries in a log bucket. A bucket may contain a maximum of 30 views.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.buckets.views.create", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "name": { - "description": "Required. The full resource name of the view to update \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", + "parent": { + "description": "Required. The bucket in which to create the view `\"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\"` For example:\"projects/my-project/locations/global/buckets/my-bucket\"", "location": "path", - "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, "type": "string" }, - "updateMask": { - "description": "Optional. Field mask that specifies the fields in view that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=filter", - "format": "google-fieldmask", + "viewId": { + "description": "Required. A client-assigned identifier such as \"my-view\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", "location": "query", "type": "string" } }, - "path": "v2/{+name}", + "path": "v2/{+parent}/views", "request": { "$ref": "LogView" }, @@ -782,44 +851,251 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] - } - } - } - } - }, - "operations": { - "methods": { - "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations/{operationsId}:cancel", - "httpMethod": "POST", - "id": "logging.billingAccounts.locations.operations.cancel", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", - "location": "path", - "pattern": "^billingAccounts/[^/]+/locations/[^/]+/operations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}:cancel", - "request": { - "$ref": "CancelOperationRequest" - }, - "response": { - "$ref": "Empty" + }, + "delete": { + "description": "Deletes a view on a log bucket. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can delete the view. If this occurs, please try again in a few minutes.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", + "httpMethod": "DELETE", + "id": "logging.billingAccounts.locations.buckets.views.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the view to delete: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a view on a log bucket.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.buckets.views.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the policy: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "LogView" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists views on a log bucket.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.buckets.views.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The bucket whose views are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/views", + "response": { + "$ref": "ListViewsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "patch": { + "description": "Updates a view on a log bucket. This method replaces the value of the filter field from the existing view with the corresponding value from the new view. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", + "httpMethod": "PATCH", + "id": "logging.billingAccounts.locations.buckets.views.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the view to update \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket/views/my-view\"", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in view that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=filter", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "LogView" + }, + "response": { + "$ref": "LogView" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + }, + "resources": { + "logs": { + "methods": { + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}/logs", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.buckets.views.logs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "resourceNames": { + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", + "location": "query", + "repeated": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logs", + "response": { + "$ref": "ListLogsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + } + } + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "logging.billingAccounts.locations.operations.list", @@ -863,6 +1139,152 @@ ] } } + }, + "recentQueries": { + "methods": { + "list": { + "description": "Lists the RecentQueries that were created by the user making the request.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recentQueries", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.recentQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example:projects/my-project/locations/us-central1Note: The location portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all recent queries.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/recentQueries", + "response": { + "$ref": "ListRecentQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, + "savedQueries": { + "methods": { + "create": { + "description": "Creates a new SavedQuery for the user making the request.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.savedQueries.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource in which to create the saved query: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/global\" \"organizations/123456789/locations/us-central1\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "savedQueryId": { + "description": "Optional. The ID to use for the saved query, which will become the final component of the saved query's resource name.If the saved_query_id is not provided, the system will generate an alphanumeric ID.The saved_query_id is limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, periods.First character has to be alphanumeric.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+parent}/savedQueries", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes an existing SavedQuery that was created by the user making the request.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "DELETE", + "id": "logging.billingAccounts.locations.savedQueries.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the saved query to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "list": { + "description": "Lists the SavedQueries that were created by the user making the request.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/savedQueries", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.savedQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/us-central1\" Note: The locations portion of the resource must be specified. To get a list of all saved queries, a wildcard character - can be used for LOCATION_ID, for example: \"projects/my-project/locations/-\" ", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/savedQueries", + "response": { + "$ref": "ListSavedQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -915,14 +1337,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -941,42 +1363,10 @@ } } }, - "operations": { - "methods": { - "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "flatPath": "v2/billingAccounts/{billingAccountsId}/operations/{operationsId}", - "httpMethod": "GET", - "id": "logging.billingAccounts.operations.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The name of the operation resource.", - "location": "path", - "pattern": "^billingAccounts/[^/]+/operations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ] - } - } - }, "sinks": { "methods": { "create": { - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "description": "Creates a sink that exports specified log entries to a destination. The export begins upon ingress, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks", "httpMethod": "POST", "id": "logging.billingAccounts.sinks.create", @@ -984,6 +1374,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -992,7 +1387,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) used by the sinks with the same parent. For more information, see writer_identity in LogSink.", "location": "query", "type": "boolean" } @@ -1103,7 +1498,7 @@ ] }, "patch": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", "httpMethod": "PATCH", "id": "logging.billingAccounts.sinks.patch", @@ -1111,6 +1506,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -1119,7 +1519,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -1143,7 +1543,7 @@ ] }, "update": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}", "httpMethod": "PUT", "id": "logging.billingAccounts.sinks.update", @@ -1151,6 +1551,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -1159,7 +1564,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -1229,7 +1634,7 @@ ] }, "tail": { - "description": "Streaming read of log entries as they are ingested. Until the stream is terminated, it will continue reading logs.", + "description": "Streaming read of log entries as they are received. Until the stream is terminated, it will continue reading logs.", "flatPath": "v2/entries:tail", "httpMethod": "POST", "id": "logging.entries.tail", @@ -1250,7 +1655,7 @@ ] }, "write": { - "description": "Writes log entries to Logging. This API method is the only way to send log entries to Logging. This method is used, directly or indirectly, by the Logging agent (fluentd) and all logging libraries configured to use Logging. A single request may contain log entries for a maximum of 1000 different resources (projects, organizations, billing accounts or folders)", + "description": "Writes log entries to Logging. This API method is the only way to send log entries to Logging. This method is used, directly or indirectly, by the Logging agent (fluentd) and all logging libraries configured to use Logging. A single request may contain log entries for a maximum of 1000 different resource names (projects, organizations, billing accounts or folders), where the resource name for a log entry is determined from its logName field.", "flatPath": "v2/entries:write", "httpMethod": "POST", "id": "logging.entries.write", @@ -1435,7 +1840,7 @@ "folders": { "methods": { "getCmekSettings": { - "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/folders/{foldersId}/cmekSettings", "httpMethod": "GET", "id": "logging.folders.getCmekSettings", @@ -1444,7 +1849,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1463,7 +1868,7 @@ ] }, "getSettings": { - "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.", "flatPath": "v2/folders/{foldersId}/settings", "httpMethod": "GET", "id": "logging.folders.getSettings", @@ -1472,7 +1877,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1491,7 +1896,7 @@ ] }, "updateSettings": { - "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings fails when any of the following are true: The value of storage_location either isn't supported by Logging or violates the location OrgPolicy. The default_sink_config field is set, but it has an unspecified filter write mode. The value of kms_key_name is invalid. The associated service account doesn't have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key. Access to the key is disabled.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.", "flatPath": "v2/folders/{foldersId}/settings", "httpMethod": "PATCH", "id": "logging.folders.updateSettings", @@ -1500,7 +1905,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^folders/[^/]+$", "required": true, @@ -1800,6 +2205,40 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "createAsync": { + "description": "Creates a log bucket asynchronously that can be used to store log entries.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets:createAsync", + "httpMethod": "POST", + "id": "logging.folders.locations.buckets.createAsync", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets:createAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "delete": { "description": "Deletes a log bucket.Changes the bucket's lifecycle_state to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all log entries in the bucket will be permanently deleted.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", @@ -1894,7 +2333,7 @@ ] }, "patch": { - "description": "Updates a log bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "description": "Updates a log bucket.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}", "httpMethod": "PATCH", "id": "logging.folders.locations.buckets.patch", @@ -1916,41 +2355,76 @@ "type": "string" } }, - "path": "v2/{+name}", + "path": "v2/{+name}", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "undelete": { + "description": "Undeletes a log bucket. A bucket that has been deleted can be undeleted within the grace period of 7 days.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.folders.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", "request": { - "$ref": "LogBucket" + "$ref": "UndeleteBucketRequest" }, "response": { - "$ref": "LogBucket" + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] }, - "undelete": { - "description": "Undeletes a log bucket. A bucket that has been deleted can be undeleted within the grace period of 7 days.", - "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "updateAsync": { + "description": "Updates a log bucket asynchronously.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}:updateAsync", "httpMethod": "POST", - "id": "logging.folders.locations.buckets.undelete", + "id": "logging.folders.locations.buckets.updateAsync", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", "location": "path", "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, - "path": "v2/{+name}:undelete", + "path": "v2/{+name}:updateAsync", "request": { - "$ref": "UndeleteBucketRequest" + "$ref": "LogBucket" }, "response": { - "$ref": "Empty" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -1959,6 +2433,137 @@ } }, "resources": { + "links": { + "methods": { + "create": { + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "POST", + "id": "logging.folders.locations.buckets.links.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "request": { + "$ref": "Link" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a link. This will also delete the corresponding BigQuery linked dataset.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "DELETE", + "id": "logging.folders.locations.buckets.links.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a link.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "GET", + "id": "logging.folders.locations.buckets.links.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the link: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Link" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists links.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "GET", + "id": "logging.folders.locations.buckets.links.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose links are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "response": { + "$ref": "ListLinksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, "views": { "methods": { "create": { @@ -1978,7 +2583,7 @@ "type": "string" }, "viewId": { - "description": "Required. The id to use for this view.", + "description": "Required. A client-assigned identifier such as \"my-view\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", "location": "query", "type": "string" } @@ -2022,7 +2627,7 @@ ] }, "get": { - "description": "Gets a view on a log bucket..", + "description": "Gets a view on a log bucket.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "GET", "id": "logging.folders.locations.buckets.views.get", @@ -2089,7 +2694,7 @@ ] }, "patch": { - "description": "Updates a view on a log bucket. This method replaces the following fields in the existing view with values from the new view: filter. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", + "description": "Updates a view on a log bucket. This method replaces the value of the filter field from the existing view with the corresponding value from the new view. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "PATCH", "id": "logging.folders.locations.buckets.views.patch", @@ -2123,6 +2728,57 @@ "https://www.googleapis.com/auth/logging.admin" ] } + }, + "resources": { + "logs": { + "methods": { + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}/logs", + "httpMethod": "GET", + "id": "logging.folders.locations.buckets.views.logs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "resourceNames": { + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", + "location": "query", + "repeated": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logs", + "response": { + "$ref": "ListLogsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + } } } } @@ -2187,41 +2843,187 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/operations", "httpMethod": "GET", - "id": "logging.folders.locations.operations.list", + "id": "logging.folders.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, + "recentQueries": { + "methods": { + "list": { + "description": "Lists the RecentQueries that were created by the user making the request.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/recentQueries", + "httpMethod": "GET", + "id": "logging.folders.locations.recentQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example:projects/my-project/locations/us-central1Note: The location portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all recent queries.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/recentQueries", + "response": { + "$ref": "ListRecentQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, + "savedQueries": { + "methods": { + "create": { + "description": "Creates a new SavedQuery for the user making the request.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries", + "httpMethod": "POST", + "id": "logging.folders.locations.savedQueries.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource in which to create the saved query: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/global\" \"organizations/123456789/locations/us-central1\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "savedQueryId": { + "description": "Optional. The ID to use for the saved query, which will become the final component of the saved query's resource name.If the saved_query_id is not provided, the system will generate an alphanumeric ID.The saved_query_id is limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, periods.First character has to be alphanumeric.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+parent}/savedQueries", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes an existing SavedQuery that was created by the user making the request.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "DELETE", + "id": "logging.folders.locations.savedQueries.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the saved query to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "list": { + "description": "Lists the SavedQueries that were created by the user making the request.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/savedQueries", + "httpMethod": "GET", + "id": "logging.folders.locations.savedQueries.list", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" - }, - "name": { - "description": "The name of the operation's parent resource.", - "location": "path", - "pattern": "^folders/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - }, "pageSize": { - "description": "The standard list page size.", + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The standard list page token.", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", "location": "query", "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/us-central1\" Note: The locations portion of the resource must be specified. To get a list of all saved queries, a wildcard character - can be used for LOCATION_ID, for example: \"projects/my-project/locations/-\" ", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" } }, - "path": "v2/{+name}/operations", + "path": "v2/{+parent}/savedQueries", "response": { - "$ref": "ListOperationsResponse" + "$ref": "ListSavedQueriesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -2283,14 +3085,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^folders/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -2312,7 +3114,7 @@ "sinks": { "methods": { "create": { - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "description": "Creates a sink that exports specified log entries to a destination. The export begins upon ingress, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "flatPath": "v2/folders/{foldersId}/sinks", "httpMethod": "POST", "id": "logging.folders.sinks.create", @@ -2320,6 +3122,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -2328,7 +3135,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) used by the sinks with the same parent. For more information, see writer_identity in LogSink.", "location": "query", "type": "boolean" } @@ -2439,7 +3246,7 @@ ] }, "patch": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", "httpMethod": "PATCH", "id": "logging.folders.sinks.patch", @@ -2447,6 +3254,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -2455,7 +3267,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -2479,7 +3291,7 @@ ] }, "update": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}", "httpMethod": "PUT", "id": "logging.folders.sinks.update", @@ -2487,6 +3299,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -2495,7 +3312,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -2634,6 +3451,40 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "createAsync": { + "description": "Creates a log bucket asynchronously that can be used to store log entries.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets:createAsync", + "httpMethod": "POST", + "id": "logging.locations.buckets.createAsync", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets:createAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "delete": { "description": "Deletes a log bucket.Changes the bucket's lifecycle_state to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all log entries in the bucket will be permanently deleted.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", @@ -2728,7 +3579,7 @@ ] }, "patch": { - "description": "Updates a log bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "description": "Updates a log bucket.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}", "httpMethod": "PATCH", "id": "logging.locations.buckets.patch", @@ -2790,9 +3641,175 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] + }, + "updateAsync": { + "description": "Updates a log bucket asynchronously.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}:updateAsync", + "httpMethod": "POST", + "id": "logging.locations.buckets.updateAsync", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}:updateAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] } }, "resources": { + "links": { + "methods": { + "create": { + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "POST", + "id": "logging.locations.buckets.links.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "request": { + "$ref": "Link" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a link. This will also delete the corresponding BigQuery linked dataset.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "DELETE", + "id": "logging.locations.buckets.links.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a link.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "GET", + "id": "logging.locations.buckets.links.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the link: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Link" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists links.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "GET", + "id": "logging.locations.buckets.links.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose links are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "response": { + "$ref": "ListLinksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, "views": { "methods": { "create": { @@ -2812,7 +3829,7 @@ "type": "string" }, "viewId": { - "description": "Required. The id to use for this view.", + "description": "Required. A client-assigned identifier such as \"my-view\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", "location": "query", "type": "string" } @@ -2856,7 +3873,7 @@ ] }, "get": { - "description": "Gets a view on a log bucket..", + "description": "Gets a view on a log bucket.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "GET", "id": "logging.locations.buckets.views.get", @@ -2923,7 +3940,7 @@ ] }, "patch": { - "description": "Updates a view on a log bucket. This method replaces the following fields in the existing view with values from the new view: filter. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", + "description": "Updates a view on a log bucket. This method replaces the value of the filter field from the existing view with the corresponding value from the new view. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "PATCH", "id": "logging.locations.buckets.views.patch", @@ -3021,7 +4038,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "logging.locations.operations.list", @@ -3117,14 +4134,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -3180,7 +4197,7 @@ "organizations": { "methods": { "getCmekSettings": { - "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/organizations/{organizationsId}/cmekSettings", "httpMethod": "GET", "id": "logging.organizations.getCmekSettings", @@ -3189,7 +4206,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -3208,7 +4225,7 @@ ] }, "getSettings": { - "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.", "flatPath": "v2/organizations/{organizationsId}/settings", "httpMethod": "GET", "id": "logging.organizations.getSettings", @@ -3217,7 +4234,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -3236,7 +4253,7 @@ ] }, "updateCmekSettings": { - "description": "Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings fails when any of the following are true: The value of kms_key_name is invalid. The associated service account doesn't have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key. Access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/organizations/{organizationsId}/cmekSettings", "httpMethod": "PATCH", "id": "logging.organizations.updateCmekSettings", @@ -3271,7 +4288,7 @@ ] }, "updateSettings": { - "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings fails when any of the following are true: The value of storage_location either isn't supported by Logging or violates the location OrgPolicy. The default_sink_config field is set, but it has an unspecified filter write mode. The value of kms_key_name is invalid. The associated service account doesn't have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key. Access to the key is disabled.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.", "flatPath": "v2/organizations/{organizationsId}/settings", "httpMethod": "PATCH", "id": "logging.organizations.updateSettings", @@ -3280,7 +4297,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -3580,6 +4597,40 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "createAsync": { + "description": "Creates a log bucket asynchronously that can be used to store log entries.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets:createAsync", + "httpMethod": "POST", + "id": "logging.organizations.locations.buckets.createAsync", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets:createAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "delete": { "description": "Deletes a log bucket.Changes the bucket's lifecycle_state to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all log entries in the bucket will be permanently deleted.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", @@ -3674,7 +4725,7 @@ ] }, "patch": { - "description": "Updates a log bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "description": "Updates a log bucket.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}", "httpMethod": "PATCH", "id": "logging.organizations.locations.buckets.patch", @@ -3696,41 +4747,76 @@ "type": "string" } }, - "path": "v2/{+name}", + "path": "v2/{+name}", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "LogBucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "undelete": { + "description": "Undeletes a log bucket. A bucket that has been deleted can be undeleted within the grace period of 7 days.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "httpMethod": "POST", + "id": "logging.organizations.locations.buckets.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", "request": { - "$ref": "LogBucket" + "$ref": "UndeleteBucketRequest" }, "response": { - "$ref": "LogBucket" + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] }, - "undelete": { - "description": "Undeletes a log bucket. A bucket that has been deleted can be undeleted within the grace period of 7 days.", - "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}:undelete", + "updateAsync": { + "description": "Updates a log bucket asynchronously.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}:updateAsync", "httpMethod": "POST", - "id": "logging.organizations.locations.buckets.undelete", + "id": "logging.organizations.locations.buckets.updateAsync", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "Required. The full resource name of the bucket to undelete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", "location": "path", "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", "required": true, "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, - "path": "v2/{+name}:undelete", + "path": "v2/{+name}:updateAsync", "request": { - "$ref": "UndeleteBucketRequest" + "$ref": "LogBucket" }, "response": { - "$ref": "Empty" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -3739,6 +4825,137 @@ } }, "resources": { + "links": { + "methods": { + "create": { + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "POST", + "id": "logging.organizations.locations.buckets.links.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "request": { + "$ref": "Link" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a link. This will also delete the corresponding BigQuery linked dataset.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "DELETE", + "id": "logging.organizations.locations.buckets.links.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a link.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "GET", + "id": "logging.organizations.locations.buckets.links.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the link: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Link" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists links.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "GET", + "id": "logging.organizations.locations.buckets.links.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose links are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "response": { + "$ref": "ListLinksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, "views": { "methods": { "create": { @@ -3758,7 +4975,7 @@ "type": "string" }, "viewId": { - "description": "Required. The id to use for this view.", + "description": "Required. A client-assigned identifier such as \"my-view\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", "location": "query", "type": "string" } @@ -3802,7 +5019,7 @@ ] }, "get": { - "description": "Gets a view on a log bucket..", + "description": "Gets a view on a log bucket.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "GET", "id": "logging.organizations.locations.buckets.views.get", @@ -3869,7 +5086,7 @@ ] }, "patch": { - "description": "Updates a view on a log bucket. This method replaces the following fields in the existing view with values from the new view: filter. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", + "description": "Updates a view on a log bucket. This method replaces the value of the filter field from the existing view with the corresponding value from the new view. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "PATCH", "id": "logging.organizations.locations.buckets.views.patch", @@ -3903,105 +5120,302 @@ "https://www.googleapis.com/auth/logging.admin" ] } - } + }, + "resources": { + "logs": { + "methods": { + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}/logs", + "httpMethod": "GET", + "id": "logging.organizations.locations.buckets.views.logs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "resourceNames": { + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", + "location": "query", + "repeated": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logs", + "response": { + "$ref": "ListLogsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + } + } + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.organizations.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.organizations.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.organizations.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, + "recentQueries": { + "methods": { + "list": { + "description": "Lists the RecentQueries that were created by the user making the request.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/recentQueries", + "httpMethod": "GET", + "id": "logging.organizations.locations.recentQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example:projects/my-project/locations/us-central1Note: The location portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all recent queries.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/recentQueries", + "response": { + "$ref": "ListRecentQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] } } }, - "operations": { + "savedQueries": { "methods": { - "cancel": { - "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", - "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "create": { + "description": "Creates a new SavedQuery for the user making the request.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries", "httpMethod": "POST", - "id": "logging.organizations.locations.operations.cancel", + "id": "logging.organizations.locations.savedQueries.create", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "name": { - "description": "The name of the operation resource to be cancelled.", + "parent": { + "description": "Required. The parent resource in which to create the saved query: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/global\" \"organizations/123456789/locations/us-central1\" ", "location": "path", - "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "pattern": "^organizations/[^/]+/locations/[^/]+$", "required": true, "type": "string" + }, + "savedQueryId": { + "description": "Optional. The ID to use for the saved query, which will become the final component of the saved query's resource name.If the saved_query_id is not provided, the system will generate an alphanumeric ID.The saved_query_id is limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, periods.First character has to be alphanumeric.", + "location": "query", + "type": "string" } }, - "path": "v2/{+name}:cancel", + "path": "v2/{+parent}/savedQueries", "request": { - "$ref": "CancelOperationRequest" + "$ref": "SavedQuery" }, "response": { - "$ref": "Empty" + "$ref": "SavedQuery" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/logging.admin" ] }, - "get": { - "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", - "httpMethod": "GET", - "id": "logging.organizations.locations.operations.get", + "delete": { + "description": "Deletes an existing SavedQuery that was created by the user making the request.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "DELETE", + "id": "logging.organizations.locations.savedQueries.delete", "parameterOrder": [ "name" ], "parameters": { "name": { - "description": "The name of the operation resource.", + "description": "Required. The full resource name of the saved query to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", "location": "path", - "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "pattern": "^organizations/[^/]+/locations/[^/]+/savedQueries/[^/]+$", "required": true, "type": "string" } }, "path": "v2/{+name}", "response": { - "$ref": "Operation" + "$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" + "https://www.googleapis.com/auth/logging.admin" ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", - "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations", + "description": "Lists the SavedQueries that were created by the user making the request.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/savedQueries", "httpMethod": "GET", - "id": "logging.organizations.locations.operations.list", + "id": "logging.organizations.locations.savedQueries.list", "parameterOrder": [ - "name" + "parent" ], "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" - }, - "name": { - "description": "The name of the operation's parent resource.", - "location": "path", - "pattern": "^organizations/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - }, "pageSize": { - "description": "The standard list page size.", + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The standard list page token.", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", "location": "query", "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/us-central1\" Note: The locations portion of the resource must be specified. To get a list of all saved queries, a wildcard character - can be used for LOCATION_ID, for example: \"projects/my-project/locations/-\" ", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" } }, - "path": "v2/{+name}/operations", + "path": "v2/{+parent}/savedQueries", "response": { - "$ref": "ListOperationsResponse" + "$ref": "ListSavedQueriesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -4063,14 +5477,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -4092,7 +5506,7 @@ "sinks": { "methods": { "create": { - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "description": "Creates a sink that exports specified log entries to a destination. The export begins upon ingress, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "flatPath": "v2/organizations/{organizationsId}/sinks", "httpMethod": "POST", "id": "logging.organizations.sinks.create", @@ -4100,6 +5514,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -4108,7 +5527,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) used by the sinks with the same parent. For more information, see writer_identity in LogSink.", "location": "query", "type": "boolean" } @@ -4219,7 +5638,7 @@ ] }, "patch": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", "httpMethod": "PATCH", "id": "logging.organizations.sinks.patch", @@ -4227,6 +5646,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -4235,7 +5659,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -4259,7 +5683,7 @@ ] }, "update": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}", "httpMethod": "PUT", "id": "logging.organizations.sinks.update", @@ -4267,6 +5691,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -4275,7 +5704,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -4305,7 +5734,7 @@ "projects": { "methods": { "getCmekSettings": { - "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/projects/{projectsId}/cmekSettings", "httpMethod": "GET", "id": "logging.projects.getCmekSettings", @@ -4314,7 +5743,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -4333,7 +5762,7 @@ ] }, "getSettings": { - "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.", "flatPath": "v2/projects/{projectsId}/settings", "httpMethod": "GET", "id": "logging.projects.getSettings", @@ -4342,7 +5771,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -4635,6 +6064,40 @@ "https://www.googleapis.com/auth/logging.admin" ] }, + "createAsync": { + "description": "Creates a log bucket asynchronously that can be used to store log entries.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets:createAsync", + "httpMethod": "POST", + "id": "logging.projects.locations.buckets.createAsync", + "parameterOrder": [ + "parent" + ], + "parameters": { + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/buckets:createAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "delete": { "description": "Deletes a log bucket.Changes the bucket's lifecycle_state to the DELETE_REQUESTED state. After 7 days, the bucket will be purged and all log entries in the bucket will be permanently deleted.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", @@ -4729,7 +6192,7 @@ ] }, "patch": { - "description": "Updates a log bucket. This method replaces the following fields in the existing bucket with values from the new bucket: retention_periodIf the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be returned.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "description": "Updates a log bucket.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", "httpMethod": "PATCH", "id": "logging.projects.locations.buckets.patch", @@ -4779,21 +6242,187 @@ "required": true, "type": "string" } - }, - "path": "v2/{+name}:undelete", - "request": { - "$ref": "UndeleteBucketRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ] - } - }, - "resources": { + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteBucketRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "updateAsync": { + "description": "Updates a log bucket asynchronously.If the bucket has a lifecycle_state of DELETE_REQUESTED, then FAILED_PRECONDITION will be returned.After a bucket has been created, the bucket's location cannot be changed.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}:updateAsync", + "httpMethod": "POST", + "id": "logging.projects.locations.buckets.updateAsync", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}:updateAsync", + "request": { + "$ref": "LogBucket" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + }, + "resources": { + "links": { + "methods": { + "create": { + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "POST", + "id": "logging.projects.locations.buckets.links.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "request": { + "$ref": "Link" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes a link. This will also delete the corresponding BigQuery linked dataset.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "DELETE", + "id": "logging.projects.locations.buckets.links.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets a link.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links/{linksId}", + "httpMethod": "GET", + "id": "logging.projects.locations.buckets.links.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the link: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/links/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Link" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists links.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links", + "httpMethod": "GET", + "id": "logging.projects.locations.buckets.links.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource whose links are to be listed: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/links", + "response": { + "$ref": "ListLinksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, "views": { "methods": { "create": { @@ -4813,7 +6442,7 @@ "type": "string" }, "viewId": { - "description": "Required. The id to use for this view.", + "description": "Required. A client-assigned identifier such as \"my-view\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", "location": "query", "type": "string" } @@ -4857,7 +6486,7 @@ ] }, "get": { - "description": "Gets a view on a log bucket..", + "description": "Gets a view on a log bucket.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "GET", "id": "logging.projects.locations.buckets.views.get", @@ -4924,7 +6553,7 @@ ] }, "patch": { - "description": "Updates a view on a log bucket. This method replaces the following fields in the existing view with values from the new view: filter. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", + "description": "Updates a view on a log bucket. This method replaces the value of the filter field from the existing view with the corresponding value from the new view. If an UNAVAILABLE error is returned, this indicates that system is not in a state where it can update the view. If this occurs, please try again in a few minutes.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", "httpMethod": "PATCH", "id": "logging.projects.locations.buckets.views.patch", @@ -4958,6 +6587,57 @@ "https://www.googleapis.com/auth/logging.admin" ] } + }, + "resources": { + "logs": { + "methods": { + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}/logs", + "httpMethod": "GET", + "id": "logging.projects.locations.buckets.views.logs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "resourceNames": { + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", + "location": "query", + "repeated": true, + "type": "string" + } + }, + "path": "v2/{+parent}/logs", + "response": { + "$ref": "ListLogsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + } } } } @@ -5022,7 +6702,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "logging.projects.locations.operations.list", @@ -5066,6 +6746,152 @@ ] } } + }, + "recentQueries": { + "methods": { + "list": { + "description": "Lists the RecentQueries that were created by the user making the request.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recentQueries", + "httpMethod": "GET", + "id": "logging.projects.locations.recentQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example:projects/my-project/locations/us-central1Note: The location portion of the resource must be specified, but supplying the character - in place of LOCATION_ID will return all recent queries.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/recentQueries", + "response": { + "$ref": "ListRecentQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, + "savedQueries": { + "methods": { + "create": { + "description": "Creates a new SavedQuery for the user making the request.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries", + "httpMethod": "POST", + "id": "logging.projects.locations.savedQueries.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource in which to create the saved query: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/global\" \"organizations/123456789/locations/us-central1\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "savedQueryId": { + "description": "Optional. The ID to use for the saved query, which will become the final component of the saved query's resource name.If the saved_query_id is not provided, the system will generate an alphanumeric ID.The saved_query_id is limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, periods.First character has to be alphanumeric.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+parent}/savedQueries", + "request": { + "$ref": "SavedQuery" + }, + "response": { + "$ref": "SavedQuery" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "delete": { + "description": "Deletes an existing SavedQuery that was created by the user making the request.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries/{savedQueriesId}", + "httpMethod": "DELETE", + "id": "logging.projects.locations.savedQueries.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the saved query to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For example: \"projects/my-project/locations/global/savedQueries/my-saved-query\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/savedQueries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "list": { + "description": "Lists the SavedQueries that were created by the user making the request.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/savedQueries", + "httpMethod": "GET", + "id": "logging.projects.locations.savedQueries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request.Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource to which the listed queries belong. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/us-central1\" Note: The locations portion of the resource must be specified. To get a list of all saved queries, a wildcard character - can be used for LOCATION_ID, for example: \"projects/my-project/locations/-\" ", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/savedQueries", + "response": { + "$ref": "ListSavedQueriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -5118,14 +6944,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^projects/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -5305,7 +7131,7 @@ "sinks": { "methods": { "create": { - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "description": "Creates a sink that exports specified log entries to a destination. The export begins upon ingress, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "flatPath": "v2/projects/{projectsId}/sinks", "httpMethod": "POST", "id": "logging.projects.sinks.create", @@ -5313,6 +7139,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -5321,7 +7152,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) used by the sinks with the same parent. For more information, see writer_identity in LogSink.", "location": "query", "type": "boolean" } @@ -5432,7 +7263,7 @@ ] }, "patch": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", "httpMethod": "PATCH", "id": "logging.projects.sinks.patch", @@ -5440,6 +7271,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -5448,7 +7284,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -5472,7 +7308,7 @@ ] }, "update": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}", "httpMethod": "PUT", "id": "logging.projects.sinks.update", @@ -5480,6 +7316,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -5488,7 +7329,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -5518,7 +7359,7 @@ "sinks": { "methods": { "create": { - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "description": "Creates a sink that exports specified log entries to a destination. The export begins upon ingress, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", "flatPath": "v2/{v2Id}/{v2Id1}/sinks", "httpMethod": "POST", "id": "logging.sinks.create", @@ -5526,6 +7367,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -5534,7 +7380,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Cloud Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) used by the sinks with the same parent. For more information, see writer_identity in LogSink.", "location": "query", "type": "boolean" } @@ -5645,7 +7491,7 @@ ] }, "update": { - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "description": "Updates a sink. This method replaces the values of the destination and filter fields of the existing sink with the corresponding values from the new sink.The updated sink might also have a new writer_identity; see the unique_writer_identity field.", "flatPath": "v2/{v2Id}/{v2Id1}/sinks/{sinksId}", "httpMethod": "PUT", "id": "logging.sinks.update", @@ -5653,6 +7499,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -5661,7 +7512,7 @@ "type": "string" }, "uniqueWriterIdentity": { - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a unique service account. It is an error if the old value is true and the new value is set to false or defaulted to false.", + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field: If the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity. If the old value is false and the new value is true, then writer_identity is changed to a service agent (https://cloud.google.com/iam/docs/service-account-types#service-agents) owned by Cloud Logging. It is an error if the old value is true and the new value is set to false or defaulted to false.", "location": "query", "type": "boolean" }, @@ -5689,7 +7540,7 @@ "v2": { "methods": { "getCmekSettings": { - "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the Logging CMEK settings for the given resource.Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/{v2Id}/{v2Id1}/cmekSettings", "httpMethod": "GET", "id": "logging.getCmekSettings", @@ -5698,7 +7549,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve CMEK settings. \"projects/[PROJECT_ID]/cmekSettings\" \"organizations/[ORGANIZATION_ID]/cmekSettings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings\" \"folders/[FOLDER_ID]/cmekSettings\" For example:\"organizations/12345/cmekSettings\"Note: CMEK for the Log Router can be configured for Google Cloud projects, folders, organizations, and billing accounts. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -5717,7 +7568,7 @@ ] }, "getSettings": { - "description": "Gets the Log Router settings for the given resource.Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Gets the settings for the given resource.Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.See View default resource settings for Logging (https://cloud.google.com/logging/docs/default-settings#view-org-settings) for more information.", "flatPath": "v2/{v2Id}/{v2Id1}/settings", "httpMethod": "GET", "id": "logging.getSettings", @@ -5726,7 +7577,7 @@ ], "parameters": { "name": { - "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can be get for Google Cloud projects, folders, organizations and billing accounts. Currently it can only be configured for organizations. Once configured for an organization, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource for which to retrieve settings. \"projects/[PROJECT_ID]/settings\" \"organizations/[ORGANIZATION_ID]/settings\" \"billingAccounts/[BILLING_ACCOUNT_ID]/settings\" \"folders/[FOLDER_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings can be retrieved for Google Cloud projects, folders, organizations, and billing accounts.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -5745,7 +7596,7 @@ ] }, "updateCmekSettings": { - "description": "Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Updates the Log Router CMEK settings for the given resource.Note: CMEK for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateCmekSettings fails when any of the following are true: The value of kms_key_name is invalid. The associated service account doesn't have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key. Access to the key is disabled.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "flatPath": "v2/{v2Id}/{v2Id1}/cmekSettings", "httpMethod": "PATCH", "id": "logging.updateCmekSettings", @@ -5780,7 +7631,7 @@ ] }, "updateSettings": { - "description": "Updates the Log Router settings for the given resource.Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.UpdateSettings will fail if 1) kms_key_name is invalid, or 2) the associated service account does not have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or 3) access to the key is disabled. 4) location_id is not supported by Logging. 5) location_id violate OrgPolicy.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Updates the settings for the given resource. This method applies to all feature configurations for organization and folders.UpdateSettings fails when any of the following are true: The value of storage_location either isn't supported by Logging or violates the location OrgPolicy. The default_sink_config field is set, but it has an unspecified filter write mode. The value of kms_key_name is invalid. The associated service account doesn't have the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key. Access to the key is disabled.See Configure default settings for organizations and folders (https://cloud.google.com/logging/docs/default-settings) for more information.", "flatPath": "v2/{v2Id}/{v2Id1}/settings", "httpMethod": "PATCH", "id": "logging.updateSettings", @@ -5789,7 +7640,7 @@ ], "parameters": { "name": { - "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"Note: Settings for the Log Router can currently only be configured for Google Cloud organizations. Once configured, it applies to all projects and folders in the Google Cloud organization.", + "description": "Required. The resource name for the settings to update. \"organizations/[ORGANIZATION_ID]/settings\" For example:\"organizations/12345/settings\"", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -5817,9 +7668,21 @@ } } }, - "revision": "20220211", + "revision": "20240207", "rootUrl": "https://logging.googleapis.com/", "schemas": { + "BigQueryDataset": { + "description": "Describes a BigQuery dataset that was created by a link.", + "id": "BigQueryDataset", + "properties": { + "datasetId": { + "description": "Output only. The full resource name of the BigQuery dataset. The DATASET_ID will match the ID of the link, so the link must match the naming restrictions of BigQuery datasets (alphanumeric characters and underscores only).The dataset will have a resource path of \"bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID\"", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "BigQueryOptions": { "description": "Options that change functionality of a sink exporting data to BigQuery.", "id": "BigQueryOptions", @@ -5829,13 +7692,63 @@ "type": "boolean" }, "usesTimestampColumnPartitioning": { - "description": "Output only. True if new timestamp column based partitioning is in use, false if legacy ingestion-time partitioning is in use.All new sinks will have this field set true and will use timestamp column based partitioning. If use_partitioned_tables is false, this value has no meaning and will be false. Legacy sinks using partitioned tables will have this field set to false.", + "description": "Output only. True if new timestamp column based partitioning is in use, false if legacy ingress-time partitioning is in use.All new sinks will have this field set true and will use timestamp column based partitioning. If use_partitioned_tables is false, this value has no meaning and will be false. Legacy sinks using partitioned tables will have this field set to false.", "readOnly": true, "type": "boolean" } }, "type": "object" }, + "BucketMetadata": { + "description": "Metadata for LongRunningUpdateBucket Operations.", + "id": "BucketMetadata", + "properties": { + "createBucketRequest": { + "$ref": "CreateBucketRequest", + "description": "LongRunningCreateBucket RPC request." + }, + "endTime": { + "description": "The end time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "The create time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "Output only. State of an operation.", + "enum": [ + "OPERATION_STATE_UNSPECIFIED", + "OPERATION_STATE_SCHEDULED", + "OPERATION_STATE_WAITING_FOR_PERMISSIONS", + "OPERATION_STATE_RUNNING", + "OPERATION_STATE_SUCCEEDED", + "OPERATION_STATE_FAILED", + "OPERATION_STATE_CANCELLED", + "OPERATION_STATE_PENDING" + ], + "enumDescriptions": [ + "Should not be used.", + "The operation is scheduled.", + "Waiting for necessary permissions.", + "The operation is running.", + "The operation was completed successfully.", + "The operation failed.", + "The operation was cancelled by the user.", + "The operation is waiting for quota." + ], + "readOnly": true, + "type": "string" + }, + "updateBucketRequest": { + "$ref": "UpdateBucketRequest", + "description": "LongRunningUpdateBucket RPC request." + } + }, + "type": "object" + }, "BucketOptions": { "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", "id": "BucketOptions", @@ -5866,7 +7779,12 @@ "id": "CmekSettings", "properties": { "kmsKeyName": { - "description": "The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name or disabled by setting the key name to an empty string. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Optional. The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the needed cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name or disabled by setting the key name to an empty string. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "type": "string" + }, + "kmsKeyVersionName": { + "description": "Output only. The CryptoKeyVersion resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1\"This is a read-only field used to convey the specific configured CryptoKeyVersion of kms_key that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version.If this field is populated, the kms_key is tied to a specific CryptoKeyVersion.", + "readOnly": true, "type": "string" }, "name": { @@ -5890,6 +7808,10 @@ "description": "Identifies whether the user has requested cancellation of the operation.", "type": "boolean" }, + "destination": { + "description": "Destination to which to copy log entries.For example, a Cloud Storage bucket:\"storage.googleapis.com/my-cloud-storage-bucket\"", + "type": "string" + }, "endTime": { "description": "The end time of an operation.", "format": "google-datetime", @@ -5904,13 +7826,17 @@ "$ref": "CopyLogEntriesRequest", "description": "CopyLogEntries RPC request." }, + "source": { + "description": "Source from which to copy log entries.For example, a log bucket:\"projects/my-project/locations/global/buckets/my-source-bucket\"", + "type": "string" + }, "startTime": { "description": "The create time of an operation.", "format": "google-datetime", "type": "string" }, "state": { - "description": "State of an operation.", + "description": "Output only. State of an operation.", "enum": [ "OPERATION_STATE_UNSPECIFIED", "OPERATION_STATE_SCHEDULED", @@ -5918,7 +7844,8 @@ "OPERATION_STATE_RUNNING", "OPERATION_STATE_SUCCEEDED", "OPERATION_STATE_FAILED", - "OPERATION_STATE_CANCELLED" + "OPERATION_STATE_CANCELLED", + "OPERATION_STATE_PENDING" ], "enumDescriptions": [ "Should not be used.", @@ -5927,8 +7854,14 @@ "The operation is running.", "The operation was completed successfully.", "The operation failed.", - "The operation was cancelled by the user." + "The operation was cancelled by the user.", + "The operation is waiting for quota." ], + "readOnly": true, + "type": "string" + }, + "verb": { + "description": "Name of the verb executed by the operation.For example,\"copy\"", "type": "string" }, "writerIdentity": { @@ -5957,20 +7890,101 @@ }, "type": "object" }, - "CopyLogEntriesResponse": { - "description": "Response type for CopyLogEntries long running operations.", - "id": "CopyLogEntriesResponse", + "CopyLogEntriesResponse": { + "description": "Response type for CopyLogEntries long running operations.", + "id": "CopyLogEntriesResponse", + "properties": { + "logEntriesCopiedCount": { + "description": "Number of log entries copied.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "CreateBucketRequest": { + "description": "The parameters to CreateBucket.", + "id": "CreateBucketRequest", + "properties": { + "bucket": { + "$ref": "LogBucket", + "description": "Required. The new bucket. The region specified in the new bucket must be compliant with any Location Restriction Org Policy. The name field in the bucket is ignored." + }, + "bucketId": { + "description": "Required. A client-assigned identifier such as \"my-bucket\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.", + "type": "string" + }, + "parent": { + "description": "Required. The resource in which to create the log bucket: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global\"", + "type": "string" + } + }, + "type": "object" + }, + "CreateLinkRequest": { + "description": "The parameters to CreateLink.", + "id": "CreateLinkRequest", + "properties": { + "link": { + "$ref": "Link", + "description": "Required. The new link." + }, + "linkId": { + "description": "Required. The ID to use for the link. The link_id can have up to 100 characters. A valid link_id must only have alphanumeric characters and underscores within it.", + "type": "string" + }, + "parent": { + "description": "Required. The full resource name of the bucket to create a link for. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" ", + "type": "string" + } + }, + "type": "object" + }, + "DefaultSinkConfig": { + "description": "Describes the custom _Default sink configuration that is used to override the built-in _Default sink configuration in newly created resource containers, such as projects or folders.", + "id": "DefaultSinkConfig", + "properties": { + "exclusions": { + "description": "Optional. Specifies the set of exclusions to be added to the _Default sink in newly created resource containers.", + "items": { + "$ref": "LogExclusion" + }, + "type": "array" + }, + "filter": { + "description": "Optional. An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-queries). The only exported log entries are those that are in the resource owning the sink and that match the filter.For example:logName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERRORTo match all logs, don't add exclusions and use the following line as the value of filter:logName:*Cannot be empty or unset when the value of mode is OVERWRITE.", + "type": "string" + }, + "mode": { + "description": "Required. Determines the behavior to apply to the built-in _Default sink inclusion filter.Exclusions are always appended, as built-in _Default sinks have no exclusions.", + "enum": [ + "FILTER_WRITE_MODE_UNSPECIFIED", + "APPEND", + "OVERWRITE" + ], + "enumDescriptions": [ + "The filter's write mode is unspecified. This mode must not be used.", + "The contents of filter will be appended to the built-in _Default sink filter. Using the append mode with an empty filter will keep the sink inclusion filter unchanged.", + "The contents of filter will overwrite the built-in _Default sink filter." + ], + "type": "string" + } + }, + "type": "object" + }, + "DeleteLinkRequest": { + "description": "The parameters to DeleteLink.", + "id": "DeleteLinkRequest", "properties": { - "logEntriesCopiedCount": { - "description": "Number of log entries copied.", - "format": "int64", + "name": { + "description": "Required. The full resource name of the link to delete. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" ", "type": "string" } }, "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", "id": "Empty", "properties": {}, "type": "object" @@ -5991,7 +8005,7 @@ "type": "object" }, "Exponential": { - "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i).Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "id": "Exponential", "properties": { "growthFactor": { @@ -6043,7 +8057,7 @@ "type": "string" }, "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", + "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (https://datatracker.ietf.org/doc/html/rfc2616#section-14.36).", "type": "string" }, "remoteIp": { @@ -6084,6 +8098,37 @@ }, "type": "object" }, + "IndexConfig": { + "description": "Configuration for an indexed field.", + "id": "IndexConfig", + "properties": { + "createTime": { + "description": "Output only. The timestamp when the index was last modified.This is used to return the timestamp, and will be ignored if supplied during update.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "fieldPath": { + "description": "Required. The LogEntry field path to index.Note that some paths are automatically indexed, and other paths are not eligible for indexing. See indexing documentation( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details.For example: jsonPayload.request.status", + "type": "string" + }, + "type": { + "description": "Required. The type of data in this index.", + "enum": [ + "INDEX_TYPE_UNSPECIFIED", + "INDEX_TYPE_STRING", + "INDEX_TYPE_INTEGER" + ], + "enumDescriptions": [ + "The index's type is unspecified.", + "The index is a string-type index.", + "The index is a integer-type index." + ], + "type": "string" + } + }, + "type": "object" + }, "LabelDescriptor": { "description": "A description of a label.", "id": "LabelDescriptor", @@ -6114,7 +8159,7 @@ "type": "object" }, "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i).Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", "id": "Linear", "properties": { "numFiniteBuckets": { @@ -6135,6 +8180,103 @@ }, "type": "object" }, + "Link": { + "description": "Describes a link connected to an analytics enabled bucket.", + "id": "Link", + "properties": { + "bigqueryDataset": { + "$ref": "BigQueryDataset", + "description": "Optional. The information of a BigQuery Dataset. When a link is created, a BigQuery dataset is created along with it, in the same project as the LogBucket it's linked to. This dataset will also have BigQuery Views corresponding to the LogViews in the bucket." + }, + "createTime": { + "description": "Output only. The creation timestamp of the link.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. Describes this link.The maximum length of the description is 8000 characters.", + "type": "string" + }, + "lifecycleState": { + "description": "Output only. The resource lifecycle state.", + "enum": [ + "LIFECYCLE_STATE_UNSPECIFIED", + "ACTIVE", + "DELETE_REQUESTED", + "UPDATING", + "CREATING", + "FAILED" + ], + "enumDescriptions": [ + "Unspecified state. This is only used/useful for distinguishing unset values.", + "The normal and active state.", + "The resource has been marked for deletion by the user. For some resources (e.g. buckets), this can be reversed by an un-delete operation.", + "The resource has been marked for an update by the user. It will remain in this state until the update is complete.", + "The resource has been marked for creation by the user. It will remain in this state until the creation is complete.", + "The resource is in an INTERNAL error state." + ], + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The resource name of the link. The name can have up to 100 characters. A valid link id (at the end of the link name) must only have alphanumeric characters and underscores within it. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links/[LINK_ID]\" For example:`projects/my-project/locations/global/buckets/my-bucket/links/my_link", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "LinkMetadata": { + "description": "Metadata for long running Link operations.", + "id": "LinkMetadata", + "properties": { + "createLinkRequest": { + "$ref": "CreateLinkRequest", + "description": "CreateLink RPC request." + }, + "deleteLinkRequest": { + "$ref": "DeleteLinkRequest", + "description": "DeleteLink RPC request." + }, + "endTime": { + "description": "The end time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "The start time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "Output only. State of an operation.", + "enum": [ + "OPERATION_STATE_UNSPECIFIED", + "OPERATION_STATE_SCHEDULED", + "OPERATION_STATE_WAITING_FOR_PERMISSIONS", + "OPERATION_STATE_RUNNING", + "OPERATION_STATE_SUCCEEDED", + "OPERATION_STATE_FAILED", + "OPERATION_STATE_CANCELLED", + "OPERATION_STATE_PENDING" + ], + "enumDescriptions": [ + "Should not be used.", + "The operation is scheduled.", + "Waiting for necessary permissions.", + "The operation is running.", + "The operation was completed successfully.", + "The operation failed.", + "The operation was cancelled by the user.", + "The operation is waiting for quota." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "ListBucketsResponse": { "description": "The response from ListBuckets.", "id": "ListBucketsResponse", @@ -6171,6 +8313,24 @@ }, "type": "object" }, + "ListLinksResponse": { + "description": "The response from ListLinks.", + "id": "ListLinksResponse", + "properties": { + "links": { + "description": "A list of links.", + "items": { + "$ref": "Link" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" + } + }, + "type": "object" + }, "ListLocationsResponse": { "description": "The response message for Locations.ListLocations.", "id": "ListLocationsResponse", @@ -6194,7 +8354,7 @@ "id": "ListLogEntriesRequest", "properties": { "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Queries (https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "description": "Optional. A filter that chooses which log entries to return. For more information, see Logging query language (https://cloud.google.com/logging/docs/view/logging-query-language).Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters.", "type": "string" }, "orderBy": { @@ -6202,7 +8362,7 @@ "type": "string" }, "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Default is 50. If the value is negative or exceeds 1000, the request is rejected. The presence of next_page_token in the response indicates that more results might be available.", + "description": "Optional. The maximum number of results to return from this request. Default is 50. If the value is negative, the request is rejected.The presence of next_page_token in the response indicates that more results might be available.", "format": "int32", "type": "integer" }, @@ -6211,6 +8371,7 @@ "type": "string" }, "projectIds": { + "deprecated": true, "description": "Optional. Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\".", "items": { "type": "string" @@ -6218,7 +8379,7 @@ "type": "array" }, "resourceNames": { - "description": "Required. Names of one or more parent resources from which to retrieve log entries: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]May alternatively be one or more views: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]Projects listed in the project_ids field are added to this list.", + "description": "Required. Names of one or more parent resources from which to retrieve log entries: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]May alternatively be one or more views: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]Projects listed in the project_ids field are added to this list. A maximum of 100 resources may be specified in a single request.", "items": { "type": "string" }, @@ -6317,6 +8478,56 @@ }, "type": "object" }, + "ListRecentQueriesResponse": { + "description": "The response from ListRecentQueries.", + "id": "ListRecentQueriesResponse", + "properties": { + "nextPageToken": { + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" + }, + "recentQueries": { + "description": "A list of recent queries.", + "items": { + "$ref": "RecentQuery" + }, + "type": "array" + }, + "unreachable": { + "description": "The unreachable resources. Each resource can be either 1) a saved query if a specific query is unreachable or 2) a location if a specific location is unreachable. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/recentQueries/[QUERY_ID]\" \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example:\"projects/my-project/locations/global/recentQueries/12345678\" \"projects/my-project/locations/global\"If there are unreachable resources, the response will first return pages that contain recent queries, and then return pages that contain the unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListSavedQueriesResponse": { + "description": "The response from ListSavedQueries.", + "id": "ListSavedQueriesResponse", + "properties": { + "nextPageToken": { + "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", + "type": "string" + }, + "savedQueries": { + "description": "A list of saved queries.", + "items": { + "$ref": "SavedQuery" + }, + "type": "array" + }, + "unreachable": { + "description": "The unreachable resources. It can be either 1) a saved query if a specific query is unreachable or 2) a location if a specific location is unreachabe. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" \"projects/[PROJECT_ID]/locations/[LOCATION_ID]\" For example: \"projects/my-project/locations/global/savedQueries/12345678\" \"projects/my-project/locations/global\" If there are unreachable resources, the response will first return pages that contain saved queries, and then return pages that contain the unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ListSinksResponse": { "description": "Result returned from ListSinks.", "id": "ListSinksResponse", @@ -6354,7 +8565,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -6387,13 +8598,28 @@ }, "type": "object" }, + "LocationMetadata": { + "description": "Cloud Logging specific location metadata.", + "id": "LocationMetadata", + "properties": { + "logAnalyticsEnabled": { + "description": "Indicates whether or not Log Analytics features are supported in the given location.", + "type": "boolean" + } + }, + "type": "object" + }, "LogBucket": { "description": "Describes a repository in which log entries are stored.", "id": "LogBucket", "properties": { + "analyticsEnabled": { + "description": "Optional. Whether log analytics is enabled for this bucket.Once enabled, log analytics features cannot be disabled.", + "type": "boolean" + }, "cmekSettings": { "$ref": "CmekSettings", - "description": "The CMEK settings of the log bucket. If present, new log entries written to this log bucket are encrypted using the CMEK key provided in this configuration. If a log bucket has CMEK settings, the CMEK settings cannot be disabled later by updating the log bucket. Changing the KMS key is allowed." + "description": "Optional. The CMEK settings of the log bucket. If present, new log entries written to this log bucket are encrypted using the CMEK key provided in this configuration. If a log bucket has CMEK settings, the CMEK settings cannot be disabled later by updating the log bucket. Changing the KMS key is allowed." }, "createTime": { "description": "Output only. The creation timestamp of the bucket. This is not set for any of the default buckets.", @@ -6402,26 +8628,39 @@ "type": "string" }, "description": { - "description": "Describes this bucket.", + "description": "Optional. Describes this bucket.", "type": "string" }, + "indexConfigs": { + "description": "Optional. A list of indexed fields and related configuration data.", + "items": { + "$ref": "IndexConfig" + }, + "type": "array" + }, "lifecycleState": { "description": "Output only. The bucket lifecycle state.", "enum": [ "LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", - "DELETE_REQUESTED" + "DELETE_REQUESTED", + "UPDATING", + "CREATING", + "FAILED" ], "enumDescriptions": [ "Unspecified state. This is only used/useful for distinguishing unset values.", "The normal and active state.", - "The resource has been marked for deletion by the user. For some resources (e.g. buckets), this can be reversed by an un-delete operation." + "The resource has been marked for deletion by the user. For some resources (e.g. buckets), this can be reversed by an un-delete operation.", + "The resource has been marked for an update by the user. It will remain in this state until the update is complete.", + "The resource has been marked for creation by the user. It will remain in this state until the creation is complete.", + "The resource is in an INTERNAL error state." ], "readOnly": true, "type": "string" }, "locked": { - "description": "Whether the bucket is locked.The retention period on a locked bucket cannot be changed. Locked buckets may only be deleted if they are empty.", + "description": "Optional. Whether the bucket is locked.The retention period on a locked bucket cannot be changed. Locked buckets may only be deleted if they are empty.", "type": "boolean" }, "name": { @@ -6430,14 +8669,14 @@ "type": "string" }, "restrictedFields": { - "description": "Log entry field paths that are denied access in this bucket.The following fields and their children are eligible: textPayload, jsonPayload, protoPayload, httpRequest, labels, sourceLocation.Restricting a repeated field will restrict all values. Adding a parent will block all child fields. (e.g. foo.bar will block foo.bar.baz)", + "description": "Optional. Log entry field paths that are denied access in this bucket.The following fields and their children are eligible: textPayload, jsonPayload, protoPayload, httpRequest, labels, sourceLocation.Restricting a repeated field will restrict all values. Adding a parent will block all child fields. (e.g. foo.bar will block foo.bar.baz)", "items": { "type": "string" }, "type": "array" }, "retentionDays": { - "description": "Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.", + "description": "Optional. Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.", "format": "int32", "type": "integer" }, @@ -6454,6 +8693,14 @@ "description": "An individual entry in a log.", "id": "LogEntry", "properties": { + "errorGroups": { + "description": "Output only. The Error Reporting (https://cloud.google.com/error-reporting) error groups associated with this LogEntry. Error Reporting sets the values for this field during error group creation.For more information, see View error details( https://cloud.google.com/error-reporting/docs/viewing-errors#view_error_details)This field isn't available during log routing (https://cloud.google.com/logging/docs/routing/overview)", + "items": { + "$ref": "LogErrorGroup" + }, + "readOnly": true, + "type": "array" + }, "httpRequest": { "$ref": "HttpRequest", "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." @@ -6478,11 +8725,12 @@ "type": "object" }, "logName": { - "description": "Required. The resource name of the log to which this log entry belongs: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" A project number may be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\".[LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual, but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", + "description": "Required. The resource name of the log to which this log entry belongs: \"projects/[PROJECT_ID]/logs/[LOG_ID]\" \"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\" \"folders/[FOLDER_ID]/logs/[LOG_ID]\" A project number may be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\".[LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is processed as usual, but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", "type": "string" }, "metadata": { "$ref": "MonitoredResourceMetadata", + "deprecated": true, "description": "Output only. Deprecated. This field is not used by Logging. Any value written to it is cleared.", "readOnly": true }, @@ -6539,7 +8787,7 @@ "description": "Optional. Source code location information associated with the log entry, if any." }, "spanId": { - "description": "Optional. The span ID within the trace associated with the log entry.For Trace spans, this is the same format that the Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as 000000000000004a.", + "description": "Optional. The ID of the Cloud Trace (https://cloud.google.com/trace) span associated with the current operation in which the log is being written. For example, if a span has the REST resource name of \"projects/some-project/traces/some-trace/spans/some-span-id\", then the span_id field is \"some-span-id\".A Span (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076", "type": "string" }, "split": { @@ -6551,16 +8799,16 @@ "type": "string" }, "timestamp": { - "description": "Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Logging assigns it the current time. Timestamps have nanosecond accuracy, but trailing zeros in the fractional seconds might be omitted when the timestamp is displayed.Incoming log entries must have timestamps that don't exceed the logs retention period (https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, and that don't exceed 24 hours in the future. Log entries outside those time boundaries aren't ingested by Logging.", + "description": "Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Logging assigns it the current time. Timestamps have nanosecond accuracy, but trailing zeros in the fractional seconds might be omitted when the timestamp is displayed.Incoming log entries must have timestamps that don't exceed the logs retention period (https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, and that don't exceed 24 hours in the future. Log entries outside those time boundaries are rejected by Logging.", "format": "google-datetime", "type": "string" }, "trace": { - "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", + "description": "Optional. The REST resource name of the trace being written to Cloud Trace (https://cloud.google.com/trace) in association with this log entry. For example, if your trace data is stored in the Cloud project \"my-trace-project\" and if the service that is creating the log entry receives a trace header that includes the trace ID \"12345\", then the service should use \"projects/my-trace-project/traces/12345\".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace.", "type": "string" }, "traceSampled": { - "description": "Optional. The sampling decision of the trace associated with the log entry.True means that the trace resource name in the trace field was sampled for storage in a trace backend. False means that the trace was not sampled for storage when this log entry was written, or the sampling decision was unknown at the time. A non-sampled trace value is still useful as a request correlation identifier. The default is False.", + "description": "Optional. The sampling decision of the span associated with the log entry at the time the log entry was created. This field corresponds to the sampled flag in the W3C trace-context specification (https://www.w3.org/TR/trace-context/#sampled-flag). A non-sampled trace value is still useful as a request correlation identifier. The default is False.", "type": "boolean" } }, @@ -6609,6 +8857,17 @@ }, "type": "object" }, + "LogErrorGroup": { + "description": "Contains metadata that associates the LogEntry to Error Reporting error groups.", + "id": "LogErrorGroup", + "properties": { + "id": { + "description": "The id is a unique identifier for a particular error group; it is the last part of the error group resource name: /project/[PROJECT_ID]/errors/[ERROR_GROUP_ID]. Example: COShysOX0r_51QE. The id is derived from key parts of the error-log content and is treated as Service Data. For information about how Service Data is handled, see Google Cloud Privacy Notice (https://cloud.google.com/terms/cloud-privacy-notice).", + "type": "string" + } + }, + "type": "object" + }, "LogExclusion": { "description": "Specifies a set of log entries that are filtered out by a sink. If your Google Cloud resource receives a large volume of log entries, you can use exclusions to reduce your chargeable logs. Note that exclusions on organization-level and folder-level sinks don't apply to child resources. Note also that you cannot modify the _Required sink or exclude logs from it.", "id": "LogExclusion", @@ -6632,7 +8891,8 @@ "type": "string" }, "name": { - "description": "Required. A client-assigned identifier, such as \"load-balancer-exclusion\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", + "description": "Output only. A client-assigned identifier, such as \"load-balancer-exclusion\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", + "readOnly": true, "type": "string" }, "updateTime": { @@ -6694,6 +8954,10 @@ "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.Logs-based metrics can also be used to extract values from logs and create a distribution of the values. The distribution records the statistics of the extracted values along with an optional histogram of the values as specified by the bucket options.", "id": "LogMetric", "properties": { + "bucketName": { + "description": "Optional. The resource name of the Log Bucket that owns the Log Metric. Only Log Buckets in projects are supported. The bucket has to be in the same project as the metric.For example:projects/my-project/locations/global/buckets/my-bucketIf empty, then the Log Metric is considered a non-Bucket Log Metric.", + "type": "string" + }, "bucketOptions": { "$ref": "BucketOptions", "description": "Optional. The bucket_options are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values." @@ -6720,7 +8984,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. A map from a label key string to an extractor expression which is used to extract data from a log entry field and assign as the label value. Each label key specified in the LabelDescriptor must have an associated extractor expression in this map. The syntax of the extractor expression is the same as for the value_extractor field.The extracted value is converted to the type defined in the label descriptor. If the either the extraction or the type conversion fails, the label will have a default value. The default value for a string label is an empty string, for an integer label its 0, and for a boolean label its false.Note that there are upper bounds on the maximum number of labels and the number of active time series that are allowed in a project.", + "description": "Optional. A map from a label key string to an extractor expression which is used to extract data from a log entry field and assign as the label value. Each label key specified in the LabelDescriptor must have an associated extractor expression in this map. The syntax of the extractor expression is the same as for the value_extractor field.The extracted value is converted to the type defined in the label descriptor. If either the extraction or the type conversion fails, the label will have a default value. The default value for a string label is an empty string, for an integer label its 0, and for a boolean label its false.Note that there are upper bounds on the maximum number of labels and the number of active time series that are allowed in a project.", "type": "object" }, "metricDescriptor": { @@ -6738,10 +9002,11 @@ "type": "string" }, "valueExtractor": { - "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", + "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The arguments are: field: The name of the log entry field from which the value is to be extracted. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", "type": "string" }, "version": { + "deprecated": true, "description": "Deprecated. The API version that created or updated this metric. The v2 format is used by default and cannot be changed.", "enum": [ "V2", @@ -6757,7 +9022,7 @@ "type": "object" }, "LogSink": { - "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, a Pub/Sub topic or a Cloud Logging log bucket. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", + "description": "Describes a sink used to export log entries to one of the following destinations: a Cloud Logging log bucket, a Cloud Storage bucket, a BigQuery dataset, a Pub/Sub topic, a Cloud project.A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", "id": "LogSink", "properties": { "bigqueryOptions": { @@ -6775,7 +9040,7 @@ "type": "string" }, "destination": { - "description": "Required. The export destination: \"storage.googleapis.com/[GCS_BUCKET]\" \"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\" \"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\" The sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", + "description": "Required. The export destination: \"storage.googleapis.com/[GCS_BUCKET]\" \"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\" \"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\" \"logging.googleapis.com/projects/[PROJECT_ID]\" \"logging.googleapis.com/projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" The sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", "type": "string" }, "disabled": { @@ -6798,10 +9063,12 @@ "type": "boolean" }, "name": { - "description": "Required. The client-assigned sink identifier, unique within the project.For example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods. First character has to be alphanumeric.", + "description": "Output only. The client-assigned sink identifier, unique within the project.For example: \"my-syslog-errors-to-pubsub\".Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, periods.First character has to be alphanumeric.", + "readOnly": true, "type": "string" }, "outputVersionFormat": { + "deprecated": true, "description": "Deprecated. This field is unused.", "enum": [ "VERSION_FORMAT_UNSPECIFIED", @@ -6822,7 +9089,7 @@ "type": "string" }, "writerIdentity": { - "description": "Output only. An IAM identity—a service account or group—under which Cloud Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource (https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.Sinks that have a destination that is a log bucket in the same project as the sink do not have a writer_identity and no additional permissions are required.", + "description": "Output only. An IAM identity—a service account or group—under which Cloud Logging writes the exported log entries to the sink's destination. This field is either set by specifying custom_writer_identity or set automatically by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource (https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.Sinks that have a destination that is a log bucket in the same project as the sink cannot have a writer_identity and no additional permissions are required.", "readOnly": true, "type": "string" } @@ -6861,15 +9128,16 @@ "type": "string" }, "description": { - "description": "Describes this view.", + "description": "Optional. Describes this view.", "type": "string" }, "filter": { - "description": "Filter that restricts which log entries in a bucket are visible in this view.Filters are restricted to be a logical AND of ==/!= of any of the following: originating project/folder/organization/billing account. resource type log idFor example:SOURCE(\"projects/myproject\") AND resource.type = \"gce_instance\" AND LOG_ID(\"stdout\")", + "description": "Optional. Filter that restricts which log entries in a bucket are visible in this view.Filters must be logical conjunctions that use the AND operator, and they can use any of the following qualifiers: SOURCE(), which specifies a project, folder, organization, or billing account of origin. resource.type, which specifies the resource type. LOG_ID(), which identifies the log.They can also use the negations of these qualifiers with the NOT operator.For example:SOURCE(\"projects/myproject\") AND resource.type = \"gce_instance\" AND NOT LOG_ID(\"stdout\")", "type": "string" }, "name": { - "description": "The resource name of the view.For example:projects/my-project/locations/global/buckets/my-bucket/views/my-view", + "description": "Output only. The resource name of the view.For example:projects/my-project/locations/global/buckets/my-bucket/views/my-view", + "readOnly": true, "type": "string" }, "updateTime": { @@ -6881,6 +9149,34 @@ }, "type": "object" }, + "LoggingQuery": { + "description": "Describes a Cloud Logging query that can be run in Logs Explorer UI or via the logging API.In addition to the query itself, additional information may be stored to capture the display configuration and other UI state used in association with analysis of query results.", + "id": "LoggingQuery", + "properties": { + "filter": { + "description": "Required. An advanced query using the Logging Query Language (https://cloud.google.com/logging/docs/view/logging-query-language). The maximum length of the filter is 20000 characters.", + "type": "string" + }, + "summaryFieldEnd": { + "description": "Characters will be counted from the end of the string.", + "format": "int32", + "type": "integer" + }, + "summaryFieldStart": { + "description": "Characters will be counted from the start of the string.", + "format": "int32", + "type": "integer" + }, + "summaryFields": { + "description": "Optional. The set of summary fields to display for this saved query.", + "items": { + "$ref": "SummaryField" + }, + "type": "array" + } + }, + "type": "object" + }, "MetricDescriptor": { "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", "id": "MetricDescriptor", @@ -6998,6 +9294,7 @@ "type": "string" }, "launchStage": { + "deprecated": true, "description": "Deprecated. Must use the MetricDescriptor.launch_stage instead.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", @@ -7095,7 +9392,7 @@ "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).", + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. For a list of types, see Monitored resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).", "type": "string" } }, @@ -7152,12 +9449,49 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", "type": "object" } }, "type": "object" }, + "OpsAnalyticsQuery": { + "description": "Describes an analytics query that can be run in the Log Analytics page of Google Cloud console.Preview: This is a preview feature and may be subject to change before final release.", + "id": "OpsAnalyticsQuery", + "properties": { + "sqlQueryText": { + "description": "Required. A logs analytics SQL query, which generally follows BigQuery format.This is the SQL query that appears in the Log Analytics UI's query editor.", + "type": "string" + } + }, + "type": "object" + }, + "RecentQuery": { + "description": "Describes a recent query executed on the Logs Explorer or Log Analytics page within the last ~ 30 days.", + "id": "RecentQuery", + "properties": { + "lastRunTime": { + "description": "Output only. The timestamp when this query was last run.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "loggingQuery": { + "$ref": "LoggingQuery", + "description": "Logging query that can be executed in Logs Explorer or via Logging API." + }, + "name": { + "description": "Output only. Resource name of the recent query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/recentQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support)The QUERY_ID is a system generated alphanumeric ID.", + "readOnly": true, + "type": "string" + }, + "opsAnalyticsQuery": { + "$ref": "OpsAnalyticsQuery", + "description": "Analytics query that can be executed in Log Analytics." + } + }, + "type": "object" + }, "RequestLog": { "description": "Complete log information about a single HTTP request to an App Engine application.", "id": "RequestLog", @@ -7267,6 +9601,10 @@ }, "type": "array" }, + "spanId": { + "description": "Stackdriver Trace span identifier for this request.", + "type": "string" + }, "startTime": { "description": "Time when the request started.", "format": "google-datetime", @@ -7312,20 +9650,83 @@ }, "type": "object" }, + "SavedQuery": { + "description": "Describes a query that has been saved by a user.", + "id": "SavedQuery", + "properties": { + "createTime": { + "description": "Output only. The timestamp when the saved query was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Optional. A human readable description of the saved query.", + "type": "string" + }, + "displayName": { + "description": "Optional. The user specified title for the SavedQuery.", + "type": "string" + }, + "loggingQuery": { + "$ref": "LoggingQuery", + "description": "Logging query that can be executed in Logs Explorer or via Logging API." + }, + "name": { + "description": "Output only. Resource name of the saved query.In the format: \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID]\" For a list of supported locations, see Supported Regions (https://cloud.google.com/logging/docs/region-support#bucket-regions)After the saved query is created, the location cannot be changed.If the user doesn't provide a QUERY_ID, the system will generate an alphanumeric ID.", + "readOnly": true, + "type": "string" + }, + "opsAnalyticsQuery": { + "$ref": "OpsAnalyticsQuery", + "description": "Analytics query that can be executed in Log Analytics." + }, + "updateTime": { + "description": "Output only. The timestamp when the saved query was last updated.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Settings": { - "description": "Describes the settings associated with a project, folder, organization, billing account, or flexible resource.", + "description": "Describes the settings associated with a project, folder, organization, or billing account.", "id": "Settings", "properties": { + "analyticsMode": { + "description": "Optional. The default analytics mode of an org or folder which is inherited by all newly created child project buckets.", + "enum": [ + "ANALYTICS_MODE_UNSPECIFIED", + "ANALYTICS_ENABLED", + "ANALYTICS_DISABLED" + ], + "enumDescriptions": [ + "No default analytics mode defined at this resource level, it will inherit from the closest ancester which has a defined analytics mode. If there is no specified analytics mode across the resource hierarchy, analytics will be disabled by default.", + "By default, analytics will be enabled for all new project-level buckets unless explicitly specified otherwise at bucket creation time.", + "By default, analytics will be disabled for new project-level buckets unless explicitly specified otherwise at bucket creation time." + ], + "type": "string" + }, + "defaultSinkConfig": { + "$ref": "DefaultSinkConfig", + "description": "Optional. Overrides the built-in configuration for _Default sink." + }, "disableDefaultSink": { - "description": "Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log ingestion if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed.", + "description": "Optional. If set to true, the _Default sink in newly created projects and folders will created in a disabled state. This can be used to automatically disable log storage if there is already an aggregated sink configured in the hierarchy. The _Default sink can be re-enabled manually if needed.", "type": "boolean" }, "kmsKeyName": { - "description": "Optional. The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK for the Log Router, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Optional. The resource name for the configured Cloud KMS key.KMS key name format: \"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]\" For example:\"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key\"To enable CMEK, set this field to a valid kms_key_name for which the associated service account has the required roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key.The Cloud KMS key used by the Log Router can be updated by changing the kms_key_name to a new valid key name.To disable CMEK for the Log Router, set this field to an empty string.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "type": "string" }, "kmsServiceAccountId": { - "description": "Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK for Log Router, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that the Log Router will use to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "description": "Output only. The service account that will be used by the Log Router to access your Cloud KMS key.Before enabling CMEK, you must first assign the role roles/cloudkms.cryptoKeyEncrypterDecrypter to the service account that will be used to access your Cloud KMS key. Use GetSettings to obtain the service account ID.See Enabling CMEK for Log Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", + "readOnly": true, + "type": "string" + }, + "loggingServiceAccountId": { + "description": "Output only. The service account for the given resource container, such as project or folder. Log sinks use this service account as their writer_identity if no custom service account is provided in the request when calling the create sink method.", "readOnly": true, "type": "string" }, @@ -7335,7 +9736,7 @@ "type": "string" }, "storageLocation": { - "description": "Optional. The Cloud region that will be used for _Default and _Required log buckets for newly created projects and folders. For example europe-west1. This setting does not affect the location of custom log buckets.", + "description": "Optional. The storage location for the _Default and _Required log buckets of newly created projects and folders, unless the storage location is explicitly provided.Example value: europe-west1.Note: this setting does not affect the location of resources where a location is explicitly provided when created, such as custom log buckets.", "type": "string" } }, @@ -7403,6 +9804,17 @@ }, "type": "object" }, + "SummaryField": { + "description": "A field from the LogEntry that is added to the summary line (https://cloud.google.com/logging/docs/view/logs-explorer-interface#add-summary-fields) for a query in the Logs Explorer.", + "id": "SummaryField", + "properties": { + "field": { + "description": "Optional. The field from the LogEntry to include in the summary line, for example resource.type or jsonPayload.name.", + "type": "string" + } + }, + "type": "object" + }, "SuppressionInfo": { "description": "Information about entries that were omitted from the session.", "id": "SuppressionInfo", @@ -7439,7 +9851,7 @@ "type": "string" }, "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters (https://cloud.google.com/logging/docs/view/advanced_filters). Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "description": "Optional. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters.", "type": "string" }, "resourceNames": { @@ -7479,6 +9891,26 @@ "properties": {}, "type": "object" }, + "UpdateBucketRequest": { + "description": "The parameters to UpdateBucket.", + "id": "UpdateBucketRequest", + "properties": { + "bucket": { + "$ref": "LogBucket", + "description": "Required. The updated bucket." + }, + "name": { + "description": "Required. The full resource name of the bucket to update. \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" \"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]\" For example:\"projects/my-project/locations/global/buckets/my-bucket\"", + "type": "string" + }, + "updateMask": { + "description": "Required. Field mask that specifies the fields in bucket that need an update. A bucket field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskFor example: updateMask=retention_days", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, "WriteLogEntriesRequest": { "description": "The parameters to WriteLogEntries.", "id": "WriteLogEntriesRequest", @@ -7506,7 +9938,7 @@ "type": "string" }, "partialSuccess": { - "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, then the response status is the error associated with one of the failed entries and the response includes error details keyed by the entries' zero-based index in the entries.write method.", + "description": "Optional. Whether a batch's valid entries should be written even if some other entry failed due to a permanent error such as INVALID_ARGUMENT or PERMISSION_DENIED. If any entry failed, then the response status is the response status of one of the failed entries. The response will include error details in WriteLogEntriesPartialErrors.log_entry_errors keyed by the entries' zero-based index in the entries. Failed requests for which no entries are written will not include per-entry errors.", "type": "boolean" }, "resource": { diff --git a/event-exporter/vendor/google.golang.org/api/logging/v2/logging-gen.go b/event-exporter/vendor/google.golang.org/api/logging/v2/logging-gen.go index aa8c63f12..ae874279e 100644 --- a/event-exporter/vendor/google.golang.org/api/logging/v2/logging-gen.go +++ b/event-exporter/vendor/google.golang.org/api/logging/v2/logging-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2024 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,37 +10,51 @@ // // For product documentation, see: https://cloud.google.com/logging/docs/ // -// Creating a client +// # Library status // -// Usage example: +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. // -// import "google.golang.org/api/logging/v2" -// ... -// ctx := context.Background() -// loggingService, err := logging.NewService(ctx) +// # Creating a client // -// In this example, Google Application Default Credentials are used for authentication. +// Usage example: // -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// import "google.golang.org/api/logging/v2" +// ... +// ctx := context.Background() +// loggingService, err := logging.NewService(ctx) // -// Other authentication options +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// # Other authentication options // -// loggingService, err := logging.NewService(ctx, option.WithScopes(logging.LoggingWriteScope)) +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// loggingService, err := logging.NewService(ctx, option.WithScopes(logging.LoggingWriteScope)) // -// loggingService, err := logging.NewService(ctx, option.WithAPIKey("AIza...")) +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// loggingService, err := logging.NewService(ctx, option.WithAPIKey("AIza...")) // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// loggingService, err := logging.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// loggingService, err := logging.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See [google.golang.org/api/option.ClientOption] for details on options. package logging // import "google.golang.org/api/logging/v2" import ( @@ -77,12 +91,15 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "logging:v2" const apiName = "logging" const apiVersion = "v2" const basePath = "https://logging.googleapis.com/" +const basePathTemplate = "https://logging.UNIVERSE_DOMAIN/" const mtlsBasePath = "https://logging.mtls.googleapis.com/" +const defaultUniverseDomain = "googleapis.com" // OAuth2 scopes used by this API. const ( @@ -106,7 +123,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", "https://www.googleapis.com/auth/logging.admin", @@ -116,7 +133,9 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -192,11 +211,9 @@ func (s *Service) userAgent() string { func NewBillingAccountsService(s *Service) *BillingAccountsService { rs := &BillingAccountsService{s: s} - rs.Buckets = NewBillingAccountsBucketsService(s) rs.Exclusions = NewBillingAccountsExclusionsService(s) rs.Locations = NewBillingAccountsLocationsService(s) rs.Logs = NewBillingAccountsLogsService(s) - rs.Operations = NewBillingAccountsOperationsService(s) rs.Sinks = NewBillingAccountsSinksService(s) return rs } @@ -204,40 +221,15 @@ func NewBillingAccountsService(s *Service) *BillingAccountsService { type BillingAccountsService struct { s *Service - Buckets *BillingAccountsBucketsService - Exclusions *BillingAccountsExclusionsService Locations *BillingAccountsLocationsService Logs *BillingAccountsLogsService - Operations *BillingAccountsOperationsService - Sinks *BillingAccountsSinksService } -func NewBillingAccountsBucketsService(s *Service) *BillingAccountsBucketsService { - rs := &BillingAccountsBucketsService{s: s} - rs.Views = NewBillingAccountsBucketsViewsService(s) - return rs -} - -type BillingAccountsBucketsService struct { - s *Service - - Views *BillingAccountsBucketsViewsService -} - -func NewBillingAccountsBucketsViewsService(s *Service) *BillingAccountsBucketsViewsService { - rs := &BillingAccountsBucketsViewsService{s: s} - return rs -} - -type BillingAccountsBucketsViewsService struct { - s *Service -} - func NewBillingAccountsExclusionsService(s *Service) *BillingAccountsExclusionsService { rs := &BillingAccountsExclusionsService{s: s} return rs @@ -251,6 +243,8 @@ func NewBillingAccountsLocationsService(s *Service) *BillingAccountsLocationsSer rs := &BillingAccountsLocationsService{s: s} rs.Buckets = NewBillingAccountsLocationsBucketsService(s) rs.Operations = NewBillingAccountsLocationsOperationsService(s) + rs.RecentQueries = NewBillingAccountsLocationsRecentQueriesService(s) + rs.SavedQueries = NewBillingAccountsLocationsSavedQueriesService(s) return rs } @@ -260,10 +254,15 @@ type BillingAccountsLocationsService struct { Buckets *BillingAccountsLocationsBucketsService Operations *BillingAccountsLocationsOperationsService + + RecentQueries *BillingAccountsLocationsRecentQueriesService + + SavedQueries *BillingAccountsLocationsSavedQueriesService } func NewBillingAccountsLocationsBucketsService(s *Service) *BillingAccountsLocationsBucketsService { rs := &BillingAccountsLocationsBucketsService{s: s} + rs.Links = NewBillingAccountsLocationsBucketsLinksService(s) rs.Views = NewBillingAccountsLocationsBucketsViewsService(s) return rs } @@ -271,16 +270,39 @@ func NewBillingAccountsLocationsBucketsService(s *Service) *BillingAccountsLocat type BillingAccountsLocationsBucketsService struct { s *Service + Links *BillingAccountsLocationsBucketsLinksService + Views *BillingAccountsLocationsBucketsViewsService } +func NewBillingAccountsLocationsBucketsLinksService(s *Service) *BillingAccountsLocationsBucketsLinksService { + rs := &BillingAccountsLocationsBucketsLinksService{s: s} + return rs +} + +type BillingAccountsLocationsBucketsLinksService struct { + s *Service +} + func NewBillingAccountsLocationsBucketsViewsService(s *Service) *BillingAccountsLocationsBucketsViewsService { rs := &BillingAccountsLocationsBucketsViewsService{s: s} + rs.Logs = NewBillingAccountsLocationsBucketsViewsLogsService(s) return rs } type BillingAccountsLocationsBucketsViewsService struct { s *Service + + Logs *BillingAccountsLocationsBucketsViewsLogsService +} + +func NewBillingAccountsLocationsBucketsViewsLogsService(s *Service) *BillingAccountsLocationsBucketsViewsLogsService { + rs := &BillingAccountsLocationsBucketsViewsLogsService{s: s} + return rs +} + +type BillingAccountsLocationsBucketsViewsLogsService struct { + s *Service } func NewBillingAccountsLocationsOperationsService(s *Service) *BillingAccountsLocationsOperationsService { @@ -292,21 +314,30 @@ type BillingAccountsLocationsOperationsService struct { s *Service } -func NewBillingAccountsLogsService(s *Service) *BillingAccountsLogsService { - rs := &BillingAccountsLogsService{s: s} +func NewBillingAccountsLocationsRecentQueriesService(s *Service) *BillingAccountsLocationsRecentQueriesService { + rs := &BillingAccountsLocationsRecentQueriesService{s: s} return rs } -type BillingAccountsLogsService struct { +type BillingAccountsLocationsRecentQueriesService struct { s *Service } -func NewBillingAccountsOperationsService(s *Service) *BillingAccountsOperationsService { - rs := &BillingAccountsOperationsService{s: s} +func NewBillingAccountsLocationsSavedQueriesService(s *Service) *BillingAccountsLocationsSavedQueriesService { + rs := &BillingAccountsLocationsSavedQueriesService{s: s} return rs } -type BillingAccountsOperationsService struct { +type BillingAccountsLocationsSavedQueriesService struct { + s *Service +} + +func NewBillingAccountsLogsService(s *Service) *BillingAccountsLogsService { + rs := &BillingAccountsLogsService{s: s} + return rs +} + +type BillingAccountsLogsService struct { s *Service } @@ -371,6 +402,8 @@ func NewFoldersLocationsService(s *Service) *FoldersLocationsService { rs := &FoldersLocationsService{s: s} rs.Buckets = NewFoldersLocationsBucketsService(s) rs.Operations = NewFoldersLocationsOperationsService(s) + rs.RecentQueries = NewFoldersLocationsRecentQueriesService(s) + rs.SavedQueries = NewFoldersLocationsSavedQueriesService(s) return rs } @@ -380,10 +413,15 @@ type FoldersLocationsService struct { Buckets *FoldersLocationsBucketsService Operations *FoldersLocationsOperationsService + + RecentQueries *FoldersLocationsRecentQueriesService + + SavedQueries *FoldersLocationsSavedQueriesService } func NewFoldersLocationsBucketsService(s *Service) *FoldersLocationsBucketsService { rs := &FoldersLocationsBucketsService{s: s} + rs.Links = NewFoldersLocationsBucketsLinksService(s) rs.Views = NewFoldersLocationsBucketsViewsService(s) return rs } @@ -391,16 +429,39 @@ func NewFoldersLocationsBucketsService(s *Service) *FoldersLocationsBucketsServi type FoldersLocationsBucketsService struct { s *Service + Links *FoldersLocationsBucketsLinksService + Views *FoldersLocationsBucketsViewsService } +func NewFoldersLocationsBucketsLinksService(s *Service) *FoldersLocationsBucketsLinksService { + rs := &FoldersLocationsBucketsLinksService{s: s} + return rs +} + +type FoldersLocationsBucketsLinksService struct { + s *Service +} + func NewFoldersLocationsBucketsViewsService(s *Service) *FoldersLocationsBucketsViewsService { rs := &FoldersLocationsBucketsViewsService{s: s} + rs.Logs = NewFoldersLocationsBucketsViewsLogsService(s) return rs } type FoldersLocationsBucketsViewsService struct { s *Service + + Logs *FoldersLocationsBucketsViewsLogsService +} + +func NewFoldersLocationsBucketsViewsLogsService(s *Service) *FoldersLocationsBucketsViewsLogsService { + rs := &FoldersLocationsBucketsViewsLogsService{s: s} + return rs +} + +type FoldersLocationsBucketsViewsLogsService struct { + s *Service } func NewFoldersLocationsOperationsService(s *Service) *FoldersLocationsOperationsService { @@ -412,6 +473,24 @@ type FoldersLocationsOperationsService struct { s *Service } +func NewFoldersLocationsRecentQueriesService(s *Service) *FoldersLocationsRecentQueriesService { + rs := &FoldersLocationsRecentQueriesService{s: s} + return rs +} + +type FoldersLocationsRecentQueriesService struct { + s *Service +} + +func NewFoldersLocationsSavedQueriesService(s *Service) *FoldersLocationsSavedQueriesService { + rs := &FoldersLocationsSavedQueriesService{s: s} + return rs +} + +type FoldersLocationsSavedQueriesService struct { + s *Service +} + func NewFoldersLogsService(s *Service) *FoldersLogsService { rs := &FoldersLogsService{s: s} return rs @@ -447,6 +526,7 @@ type LocationsService struct { func NewLocationsBucketsService(s *Service) *LocationsBucketsService { rs := &LocationsBucketsService{s: s} + rs.Links = NewLocationsBucketsLinksService(s) rs.Views = NewLocationsBucketsViewsService(s) return rs } @@ -454,9 +534,20 @@ func NewLocationsBucketsService(s *Service) *LocationsBucketsService { type LocationsBucketsService struct { s *Service + Links *LocationsBucketsLinksService + Views *LocationsBucketsViewsService } +func NewLocationsBucketsLinksService(s *Service) *LocationsBucketsLinksService { + rs := &LocationsBucketsLinksService{s: s} + return rs +} + +type LocationsBucketsLinksService struct { + s *Service +} + func NewLocationsBucketsViewsService(s *Service) *LocationsBucketsViewsService { rs := &LocationsBucketsViewsService{s: s} return rs @@ -527,6 +618,8 @@ func NewOrganizationsLocationsService(s *Service) *OrganizationsLocationsService rs := &OrganizationsLocationsService{s: s} rs.Buckets = NewOrganizationsLocationsBucketsService(s) rs.Operations = NewOrganizationsLocationsOperationsService(s) + rs.RecentQueries = NewOrganizationsLocationsRecentQueriesService(s) + rs.SavedQueries = NewOrganizationsLocationsSavedQueriesService(s) return rs } @@ -536,10 +629,15 @@ type OrganizationsLocationsService struct { Buckets *OrganizationsLocationsBucketsService Operations *OrganizationsLocationsOperationsService + + RecentQueries *OrganizationsLocationsRecentQueriesService + + SavedQueries *OrganizationsLocationsSavedQueriesService } func NewOrganizationsLocationsBucketsService(s *Service) *OrganizationsLocationsBucketsService { rs := &OrganizationsLocationsBucketsService{s: s} + rs.Links = NewOrganizationsLocationsBucketsLinksService(s) rs.Views = NewOrganizationsLocationsBucketsViewsService(s) return rs } @@ -547,16 +645,39 @@ func NewOrganizationsLocationsBucketsService(s *Service) *OrganizationsLocations type OrganizationsLocationsBucketsService struct { s *Service + Links *OrganizationsLocationsBucketsLinksService + Views *OrganizationsLocationsBucketsViewsService } +func NewOrganizationsLocationsBucketsLinksService(s *Service) *OrganizationsLocationsBucketsLinksService { + rs := &OrganizationsLocationsBucketsLinksService{s: s} + return rs +} + +type OrganizationsLocationsBucketsLinksService struct { + s *Service +} + func NewOrganizationsLocationsBucketsViewsService(s *Service) *OrganizationsLocationsBucketsViewsService { rs := &OrganizationsLocationsBucketsViewsService{s: s} + rs.Logs = NewOrganizationsLocationsBucketsViewsLogsService(s) return rs } type OrganizationsLocationsBucketsViewsService struct { s *Service + + Logs *OrganizationsLocationsBucketsViewsLogsService +} + +func NewOrganizationsLocationsBucketsViewsLogsService(s *Service) *OrganizationsLocationsBucketsViewsLogsService { + rs := &OrganizationsLocationsBucketsViewsLogsService{s: s} + return rs +} + +type OrganizationsLocationsBucketsViewsLogsService struct { + s *Service } func NewOrganizationsLocationsOperationsService(s *Service) *OrganizationsLocationsOperationsService { @@ -568,6 +689,24 @@ type OrganizationsLocationsOperationsService struct { s *Service } +func NewOrganizationsLocationsRecentQueriesService(s *Service) *OrganizationsLocationsRecentQueriesService { + rs := &OrganizationsLocationsRecentQueriesService{s: s} + return rs +} + +type OrganizationsLocationsRecentQueriesService struct { + s *Service +} + +func NewOrganizationsLocationsSavedQueriesService(s *Service) *OrganizationsLocationsSavedQueriesService { + rs := &OrganizationsLocationsSavedQueriesService{s: s} + return rs +} + +type OrganizationsLocationsSavedQueriesService struct { + s *Service +} + func NewOrganizationsLogsService(s *Service) *OrganizationsLogsService { rs := &OrganizationsLogsService{s: s} return rs @@ -623,6 +762,8 @@ func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} rs.Buckets = NewProjectsLocationsBucketsService(s) rs.Operations = NewProjectsLocationsOperationsService(s) + rs.RecentQueries = NewProjectsLocationsRecentQueriesService(s) + rs.SavedQueries = NewProjectsLocationsSavedQueriesService(s) return rs } @@ -632,10 +773,15 @@ type ProjectsLocationsService struct { Buckets *ProjectsLocationsBucketsService Operations *ProjectsLocationsOperationsService + + RecentQueries *ProjectsLocationsRecentQueriesService + + SavedQueries *ProjectsLocationsSavedQueriesService } func NewProjectsLocationsBucketsService(s *Service) *ProjectsLocationsBucketsService { rs := &ProjectsLocationsBucketsService{s: s} + rs.Links = NewProjectsLocationsBucketsLinksService(s) rs.Views = NewProjectsLocationsBucketsViewsService(s) return rs } @@ -643,16 +789,39 @@ func NewProjectsLocationsBucketsService(s *Service) *ProjectsLocationsBucketsSer type ProjectsLocationsBucketsService struct { s *Service + Links *ProjectsLocationsBucketsLinksService + Views *ProjectsLocationsBucketsViewsService } +func NewProjectsLocationsBucketsLinksService(s *Service) *ProjectsLocationsBucketsLinksService { + rs := &ProjectsLocationsBucketsLinksService{s: s} + return rs +} + +type ProjectsLocationsBucketsLinksService struct { + s *Service +} + func NewProjectsLocationsBucketsViewsService(s *Service) *ProjectsLocationsBucketsViewsService { rs := &ProjectsLocationsBucketsViewsService{s: s} + rs.Logs = NewProjectsLocationsBucketsViewsLogsService(s) return rs } type ProjectsLocationsBucketsViewsService struct { s *Service + + Logs *ProjectsLocationsBucketsViewsLogsService +} + +func NewProjectsLocationsBucketsViewsLogsService(s *Service) *ProjectsLocationsBucketsViewsLogsService { + rs := &ProjectsLocationsBucketsViewsLogsService{s: s} + return rs +} + +type ProjectsLocationsBucketsViewsLogsService struct { + s *Service } func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { @@ -664,6 +833,24 @@ type ProjectsLocationsOperationsService struct { s *Service } +func NewProjectsLocationsRecentQueriesService(s *Service) *ProjectsLocationsRecentQueriesService { + rs := &ProjectsLocationsRecentQueriesService{s: s} + return rs +} + +type ProjectsLocationsRecentQueriesService struct { + s *Service +} + +func NewProjectsLocationsSavedQueriesService(s *Service) *ProjectsLocationsSavedQueriesService { + rs := &ProjectsLocationsSavedQueriesService{s: s} + return rs +} + +type ProjectsLocationsSavedQueriesService struct { + s *Service +} + func NewProjectsLogsService(s *Service) *ProjectsLogsService { rs := &ProjectsLogsService{s: s} return rs @@ -709,6 +896,40 @@ type V2Service struct { s *Service } +// BigQueryDataset: Describes a BigQuery dataset that was created by a +// link. +type BigQueryDataset struct { + // DatasetId: Output only. The full resource name of the BigQuery + // dataset. The DATASET_ID will match the ID of the link, so the link + // must match the naming restrictions of BigQuery datasets (alphanumeric + // characters and underscores only).The dataset will have a resource + // path of + // "bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID" + DatasetId string `json:"datasetId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BigQueryDataset) MarshalJSON() ([]byte, error) { + type NoMethod BigQueryDataset + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BigQueryOptions: Options that change functionality of a sink // exporting data to BigQuery. type BigQueryOptions struct { @@ -723,7 +944,7 @@ type BigQueryOptions struct { UsePartitionedTables bool `json:"usePartitionedTables,omitempty"` // UsesTimestampColumnPartitioning: Output only. True if new timestamp - // column based partitioning is in use, false if legacy ingestion-time + // column based partitioning is in use, false if legacy ingress-time // partitioning is in use.All new sinks will have this field set true // and will use timestamp column based partitioning. If // use_partitioned_tables is false, this value has no meaning and will @@ -756,6 +977,60 @@ func (s *BigQueryOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketMetadata: Metadata for LongRunningUpdateBucket Operations. +type BucketMetadata struct { + // CreateBucketRequest: LongRunningCreateBucket RPC request. + CreateBucketRequest *CreateBucketRequest `json:"createBucketRequest,omitempty"` + + // EndTime: The end time of an operation. + EndTime string `json:"endTime,omitempty"` + + // StartTime: The create time of an operation. + StartTime string `json:"startTime,omitempty"` + + // State: Output only. State of an operation. + // + // Possible values: + // "OPERATION_STATE_UNSPECIFIED" - Should not be used. + // "OPERATION_STATE_SCHEDULED" - The operation is scheduled. + // "OPERATION_STATE_WAITING_FOR_PERMISSIONS" - Waiting for necessary + // permissions. + // "OPERATION_STATE_RUNNING" - The operation is running. + // "OPERATION_STATE_SUCCEEDED" - The operation was completed + // successfully. + // "OPERATION_STATE_FAILED" - The operation failed. + // "OPERATION_STATE_CANCELLED" - The operation was cancelled by the + // user. + // "OPERATION_STATE_PENDING" - The operation is waiting for quota. + State string `json:"state,omitempty"` + + // UpdateBucketRequest: LongRunningUpdateBucket RPC request. + UpdateBucketRequest *UpdateBucketRequest `json:"updateBucketRequest,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateBucketRequest") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateBucketRequest") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketMetadata) MarshalJSON() ([]byte, error) { + type NoMethod BucketMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketOptions: BucketOptions describes the bucket boundaries used to // create a histogram for the distribution. The buckets can be in a // linear sequence, an exponential sequence, or each bucket can be @@ -819,14 +1094,14 @@ type CancelOperationRequest struct { // (https://cloud.google.com/logging/docs/routing/managed-encryption) // for more information. type CmekSettings struct { - // KmsKeyName: The resource name for the configured Cloud KMS key.KMS - // key name format: + // KmsKeyName: Optional. The resource name for the configured Cloud KMS + // key.KMS key name format: // "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoK // eys/[KEY]" For // example:"projects/my-project/locations/us-central1/keyRings/my-ring/cr // yptoKeys/my-key"To enable CMEK for the Log Router, set this field to // a valid kms_key_name for which the associated service account has the - // required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the + // needed cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the // key.The Cloud KMS key used by the Log Router can be updated by // changing the kms_key_name to a new valid key name or disabled by // setting the key name to an empty string. Encryption operations that @@ -839,6 +1114,18 @@ type CmekSettings struct { // for more information. KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyVersionName: Output only. The CryptoKeyVersion resource name + // for the configured Cloud KMS key.KMS key name format: + // "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoK + // eys/[KEY]/cryptoKeyVersions/[VERSION]" For + // example:"projects/my-project/locations/us-central1/keyRings/my-ring/cr + // yptoKeys/my-key/cryptoKeyVersions/1"This is a read-only field used to + // convey the specific configured CryptoKeyVersion of kms_key that has + // been configured. It will be populated in cases where the CMEK + // settings are bound to a single key version.If this field is + // populated, the kms_key is tied to a specific CryptoKeyVersion. + KmsKeyVersionName string `json:"kmsKeyVersionName,omitempty"` + // Name: Output only. The resource name of the CMEK settings. Name string `json:"name,omitempty"` @@ -887,6 +1174,10 @@ type CopyLogEntriesMetadata struct { // cancellation of the operation. CancellationRequested bool `json:"cancellationRequested,omitempty"` + // Destination: Destination to which to copy log entries.For example, a + // Cloud Storage bucket:"storage.googleapis.com/my-cloud-storage-bucket" + Destination string `json:"destination,omitempty"` + // EndTime: The end time of an operation. EndTime string `json:"endTime,omitempty"` @@ -896,10 +1187,14 @@ type CopyLogEntriesMetadata struct { // Request: CopyLogEntries RPC request. Request *CopyLogEntriesRequest `json:"request,omitempty"` + // Source: Source from which to copy log entries.For example, a log + // bucket:"projects/my-project/locations/global/buckets/my-source-bucket" + Source string `json:"source,omitempty"` + // StartTime: The create time of an operation. StartTime string `json:"startTime,omitempty"` - // State: State of an operation. + // State: Output only. State of an operation. // // Possible values: // "OPERATION_STATE_UNSPECIFIED" - Should not be used. @@ -912,8 +1207,12 @@ type CopyLogEntriesMetadata struct { // "OPERATION_STATE_FAILED" - The operation failed. // "OPERATION_STATE_CANCELLED" - The operation was cancelled by the // user. + // "OPERATION_STATE_PENDING" - The operation is waiting for quota. State string `json:"state,omitempty"` + // Verb: Name of the verb executed by the operation.For example,"copy" + Verb string `json:"verb,omitempty"` + // WriterIdentity: The IAM identity of a service account that must be // granted access to the destination.If the service account is not // granted permission to the destination within an hour, the operation @@ -1014,12 +1313,185 @@ func (s *CopyLogEntriesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CreateBucketRequest: The parameters to CreateBucket. +type CreateBucketRequest struct { + // Bucket: Required. The new bucket. The region specified in the new + // bucket must be compliant with any Location Restriction Org Policy. + // The name field in the bucket is ignored. + Bucket *LogBucket `json:"bucket,omitempty"` + + // BucketId: Required. A client-assigned identifier such as "my-bucket". + // Identifiers are limited to 100 characters and can include only + // letters, digits, underscores, hyphens, and periods. + BucketId string `json:"bucketId,omitempty"` + + // Parent: Required. The resource in which to create the log bucket: + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For + // example:"projects/my-project/locations/global" + Parent string `json:"parent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateBucketRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreateBucketRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateLinkRequest: The parameters to CreateLink. +type CreateLinkRequest struct { + // Link: Required. The new link. + Link *Link `json:"link,omitempty"` + + // LinkId: Required. The ID to use for the link. The link_id can have up + // to 100 characters. A valid link_id must only have alphanumeric + // characters and underscores within it. + LinkId string `json:"linkId,omitempty"` + + // Parent: Required. The full resource name of the bucket to create a + // link for. + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCK + // ET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/ + // [BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + Parent string `json:"parent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Link") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Link") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateLinkRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreateLinkRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DefaultSinkConfig: Describes the custom _Default sink configuration +// that is used to override the built-in _Default sink configuration in +// newly created resource containers, such as projects or folders. +type DefaultSinkConfig struct { + // Exclusions: Optional. Specifies the set of exclusions to be added to + // the _Default sink in newly created resource containers. + Exclusions []*LogExclusion `json:"exclusions,omitempty"` + + // Filter: Optional. An advanced logs filter + // (https://cloud.google.com/logging/docs/view/advanced-queries). The + // only exported log entries are those that are in the resource owning + // the sink and that match the filter.For + // example:logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND + // severity>=ERRORTo match all logs, don't add exclusions and use the + // following line as the value of filter:logName:*Cannot be empty or + // unset when the value of mode is OVERWRITE. + Filter string `json:"filter,omitempty"` + + // Mode: Required. Determines the behavior to apply to the built-in + // _Default sink inclusion filter.Exclusions are always appended, as + // built-in _Default sinks have no exclusions. + // + // Possible values: + // "FILTER_WRITE_MODE_UNSPECIFIED" - The filter's write mode is + // unspecified. This mode must not be used. + // "APPEND" - The contents of filter will be appended to the built-in + // _Default sink filter. Using the append mode with an empty filter will + // keep the sink inclusion filter unchanged. + // "OVERWRITE" - The contents of filter will overwrite the built-in + // _Default sink filter. + Mode string `json:"mode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Exclusions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Exclusions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DefaultSinkConfig) MarshalJSON() ([]byte, error) { + type NoMethod DefaultSinkConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DeleteLinkRequest: The parameters to DeleteLink. +type DeleteLinkRequest struct { + // Name: Required. The full resource name of the link to delete. + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/lin + // ks/[LINK_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCK + // ET_ID]/links/[LINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/ + // [BUCKET_ID]/links/[LINK_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links + // /[LINK_ID]" + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DeleteLinkRequest) MarshalJSON() ([]byte, error) { + type NoMethod DeleteLinkRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For // instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for Empty is empty -// JSON object {}. +// (google.protobuf.Empty); } type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1060,12 +1532,29 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *Explicit) UnmarshalJSON(data []byte) error { + type NoMethod Explicit + var s1 struct { + Bounds []gensupport.JSONFloat64 `json:"bounds"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Bounds = make([]float64, len(s1.Bounds)) + for i := range s1.Bounds { + s.Bounds[i] = float64(s1.Bounds[i]) + } + return nil +} + // Exponential: Specifies an exponential sequence of buckets that have a // width that is proportional to the value of the lower bound. Each // bucket represents a constant relative uncertainty on a specific value // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket // i has the following boundaries:Upper bound (0 <= i < N-1): scale * -// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor +// (growth_factor ^ i).Lower bound (1 <= i < N): scale * (growth_factor // ^ (i - 1)). type Exponential struct { // GrowthFactor: Must be greater than 1. @@ -1146,7 +1635,7 @@ type HttpRequest struct { // Referer: The referer URL of the request, as defined in HTTP/1.1 // Header Field Definitions - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + // (https://datatracker.ietf.org/doc/html/rfc2616#section-14.36). Referer string `json:"referer,omitempty"` // RemoteIp: The IP address (IPv4 or IPv6) of the client that issued the @@ -1209,6 +1698,51 @@ func (s *HttpRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// IndexConfig: Configuration for an indexed field. +type IndexConfig struct { + // CreateTime: Output only. The timestamp when the index was last + // modified.This is used to return the timestamp, and will be ignored if + // supplied during update. + CreateTime string `json:"createTime,omitempty"` + + // FieldPath: Required. The LogEntry field path to index.Note that some + // paths are automatically indexed, and other paths are not eligible for + // indexing. See indexing documentation( + // https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) + // for details.For example: jsonPayload.request.status + FieldPath string `json:"fieldPath,omitempty"` + + // Type: Required. The type of data in this index. + // + // Possible values: + // "INDEX_TYPE_UNSPECIFIED" - The index's type is unspecified. + // "INDEX_TYPE_STRING" - The index is a string-type index. + // "INDEX_TYPE_INTEGER" - The index is a integer-type index. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IndexConfig) MarshalJSON() ([]byte, error) { + type NoMethod IndexConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LabelDescriptor: A description of a label. type LabelDescriptor struct { // Description: A human-readable description for the label. @@ -1253,7 +1787,7 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { // constant absolute uncertainty on the specific value in the // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has // the following boundaries:Upper bound (0 <= i < N-1): offset + (width -// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). +// * i).Lower bound (1 <= i < N): offset + (width * (i - 1)). type Linear struct { // NumFiniteBuckets: Must be greater than 0. NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` @@ -1304,6 +1838,135 @@ func (s *Linear) UnmarshalJSON(data []byte) error { return nil } +// Link: Describes a link connected to an analytics enabled bucket. +type Link struct { + // BigqueryDataset: Optional. The information of a BigQuery Dataset. + // When a link is created, a BigQuery dataset is created along with it, + // in the same project as the LogBucket it's linked to. This dataset + // will also have BigQuery Views corresponding to the LogViews in the + // bucket. + BigqueryDataset *BigQueryDataset `json:"bigqueryDataset,omitempty"` + + // CreateTime: Output only. The creation timestamp of the link. + CreateTime string `json:"createTime,omitempty"` + + // Description: Optional. Describes this link.The maximum length of the + // description is 8000 characters. + Description string `json:"description,omitempty"` + + // LifecycleState: Output only. The resource lifecycle state. + // + // Possible values: + // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only + // used/useful for distinguishing unset values. + // "ACTIVE" - The normal and active state. + // "DELETE_REQUESTED" - The resource has been marked for deletion by + // the user. For some resources (e.g. buckets), this can be reversed by + // an un-delete operation. + // "UPDATING" - The resource has been marked for an update by the + // user. It will remain in this state until the update is complete. + // "CREATING" - The resource has been marked for creation by the user. + // It will remain in this state until the creation is complete. + // "FAILED" - The resource is in an INTERNAL error state. + LifecycleState string `json:"lifecycleState,omitempty"` + + // Name: Output only. The resource name of the link. The name can have + // up to 100 characters. A valid link id (at the end of the link name) + // must only have alphanumeric characters and underscores within it. + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/lin + // ks/[LINK_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCK + // ET_ID]/links/[LINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/ + // [BUCKET_ID]/links/[LINK_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/links + // /[LINK_ID]" For + // example:`projects/my-project/locations/global/buckets/my-bucket/links/ + // my_link + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BigqueryDataset") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BigqueryDataset") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Link) MarshalJSON() ([]byte, error) { + type NoMethod Link + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LinkMetadata: Metadata for long running Link operations. +type LinkMetadata struct { + // CreateLinkRequest: CreateLink RPC request. + CreateLinkRequest *CreateLinkRequest `json:"createLinkRequest,omitempty"` + + // DeleteLinkRequest: DeleteLink RPC request. + DeleteLinkRequest *DeleteLinkRequest `json:"deleteLinkRequest,omitempty"` + + // EndTime: The end time of an operation. + EndTime string `json:"endTime,omitempty"` + + // StartTime: The start time of an operation. + StartTime string `json:"startTime,omitempty"` + + // State: Output only. State of an operation. + // + // Possible values: + // "OPERATION_STATE_UNSPECIFIED" - Should not be used. + // "OPERATION_STATE_SCHEDULED" - The operation is scheduled. + // "OPERATION_STATE_WAITING_FOR_PERMISSIONS" - Waiting for necessary + // permissions. + // "OPERATION_STATE_RUNNING" - The operation is running. + // "OPERATION_STATE_SUCCEEDED" - The operation was completed + // successfully. + // "OPERATION_STATE_FAILED" - The operation failed. + // "OPERATION_STATE_CANCELLED" - The operation was cancelled by the + // user. + // "OPERATION_STATE_PENDING" - The operation is waiting for quota. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateLinkRequest") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateLinkRequest") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LinkMetadata) MarshalJSON() ([]byte, error) { + type NoMethod LinkMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListBucketsResponse: The response from ListBuckets. type ListBucketsResponse struct { // Buckets: A list of buckets. @@ -1380,6 +2043,44 @@ func (s *ListExclusionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListLinksResponse: The response from ListLinks. +type ListLinksResponse struct { + // Links: A list of links. + Links []*Link `json:"links,omitempty"` + + // NextPageToken: If there might be more results than those appearing in + // this response, then nextPageToken is included. To get the next set of + // results, call the same method again using the value of nextPageToken + // as pageToken. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Links") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Links") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListLinksResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListLinksResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListLocationsResponse: The response message for // Locations.ListLocations. type ListLocationsResponse struct { @@ -1420,13 +2121,13 @@ func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { // ListLogEntriesRequest: The parameters to ListLogEntries. type ListLogEntriesRequest struct { // Filter: Optional. A filter that chooses which log entries to return. - // See Advanced Logs Queries - // (https://cloud.google.com/logging/docs/view/advanced-queries). Only + // For more information, see Logging query language + // (https://cloud.google.com/logging/docs/view/logging-query-language).Only // log entries that match the filter are returned. An empty filter // matches all log entries in the resources listed in resource_names. // Referencing a parent resource that is not listed in resource_names - // will cause the filter to return no results. The maximum length of the - // filter is 20000 characters. + // will cause the filter to return no results. The maximum length of a + // filter is 20,000 characters. Filter string `json:"filter,omitempty"` // OrderBy: Optional. How the results should be sorted. Presently, the @@ -1439,9 +2140,9 @@ type ListLogEntriesRequest struct { OrderBy string `json:"orderBy,omitempty"` // PageSize: Optional. The maximum number of results to return from this - // request. Default is 50. If the value is negative or exceeds 1000, the - // request is rejected. The presence of next_page_token in the response - // indicates that more results might be available. + // request. Default is 50. If the value is negative, the request is + // rejected.The presence of next_page_token in the response indicates + // that more results might be available. PageSize int64 `json:"pageSize,omitempty"` // PageToken: Optional. If present, then retrieve the next batch of @@ -1468,7 +2169,8 @@ type ListLogEntriesRequest struct { // BUCKET_ID]/views/[VIEW_ID] // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]Projects listed in the project_ids field are added to this - // list. + // list. A maximum of 100 resources may be specified in a single + // request. ResourceNames []string `json:"resourceNames,omitempty"` // ForceSendFields is a list of field names (e.g. "Filter") to @@ -1695,6 +2397,106 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListRecentQueriesResponse: The response from ListRecentQueries. +type ListRecentQueriesResponse struct { + // NextPageToken: If there might be more results than appear in this + // response, then nextPageToken is included. To get the next set of + // results, call the same method again using the value of nextPageToken + // as pageToken. + NextPageToken string `json:"nextPageToken,omitempty"` + + // RecentQueries: A list of recent queries. + RecentQueries []*RecentQuery `json:"recentQueries,omitempty"` + + // Unreachable: The unreachable resources. Each resource can be either + // 1) a saved query if a specific query is unreachable or 2) a location + // if a specific location is unreachable. + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/recentQueries/[QUERY_ID + // ]" "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For + // example:"projects/my-project/locations/global/recentQueries/12345678" + // "projects/my-project/locations/global"If there are unreachable + // resources, the response will first return pages that contain recent + // queries, and then return pages that contain the unreachable + // resources. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListRecentQueriesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListRecentQueriesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListSavedQueriesResponse: The response from ListSavedQueries. +type ListSavedQueriesResponse struct { + // NextPageToken: If there might be more results than appear in this + // response, then nextPageToken is included. To get the next set of + // results, call the same method again using the value of nextPageToken + // as pageToken. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SavedQueries: A list of saved queries. + SavedQueries []*SavedQuery `json:"savedQueries,omitempty"` + + // Unreachable: The unreachable resources. It can be either 1) a saved + // query if a specific query is unreachable or 2) a location if a + // specific location is unreachabe. + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/savedQueries/[QUERY_ID] + // " "projects/[PROJECT_ID]/locations/[LOCATION_ID]" For example: + // "projects/my-project/locations/global/savedQueries/12345678" + // "projects/my-project/locations/global" If there are unreachable + // resources, the response will first return pages that contain saved + // queries, and then return pages that contain the unreachable + // resources. + Unreachable []string `json:"unreachable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListSavedQueriesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListSavedQueriesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListSinksResponse: Result returned from ListSinks. type ListSinksResponse struct { // NextPageToken: If there might be more results than appear in this @@ -1771,7 +2573,7 @@ func (s *ListViewsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -1821,22 +2623,60 @@ func (s *Location) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LocationMetadata: Cloud Logging specific location metadata. +type LocationMetadata struct { + // LogAnalyticsEnabled: Indicates whether or not Log Analytics features + // are supported in the given location. + LogAnalyticsEnabled bool `json:"logAnalyticsEnabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogAnalyticsEnabled") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LogAnalyticsEnabled") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LocationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod LocationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogBucket: Describes a repository in which log entries are stored. type LogBucket struct { - // CmekSettings: The CMEK settings of the log bucket. If present, new - // log entries written to this log bucket are encrypted using the CMEK - // key provided in this configuration. If a log bucket has CMEK - // settings, the CMEK settings cannot be disabled later by updating the - // log bucket. Changing the KMS key is allowed. + // AnalyticsEnabled: Optional. Whether log analytics is enabled for this + // bucket.Once enabled, log analytics features cannot be disabled. + AnalyticsEnabled bool `json:"analyticsEnabled,omitempty"` + + // CmekSettings: Optional. The CMEK settings of the log bucket. If + // present, new log entries written to this log bucket are encrypted + // using the CMEK key provided in this configuration. If a log bucket + // has CMEK settings, the CMEK settings cannot be disabled later by + // updating the log bucket. Changing the KMS key is allowed. CmekSettings *CmekSettings `json:"cmekSettings,omitempty"` // CreateTime: Output only. The creation timestamp of the bucket. This // is not set for any of the default buckets. CreateTime string `json:"createTime,omitempty"` - // Description: Describes this bucket. + // Description: Optional. Describes this bucket. Description string `json:"description,omitempty"` + // IndexConfigs: Optional. A list of indexed fields and related + // configuration data. + IndexConfigs []*IndexConfig `json:"indexConfigs,omitempty"` + // LifecycleState: Output only. The bucket lifecycle state. // // Possible values: @@ -1846,11 +2686,16 @@ type LogBucket struct { // "DELETE_REQUESTED" - The resource has been marked for deletion by // the user. For some resources (e.g. buckets), this can be reversed by // an un-delete operation. + // "UPDATING" - The resource has been marked for an update by the + // user. It will remain in this state until the update is complete. + // "CREATING" - The resource has been marked for creation by the user. + // It will remain in this state until the creation is complete. + // "FAILED" - The resource is in an INTERNAL error state. LifecycleState string `json:"lifecycleState,omitempty"` - // Locked: Whether the bucket is locked.The retention period on a locked - // bucket cannot be changed. Locked buckets may only be deleted if they - // are empty. + // Locked: Optional. Whether the bucket is locked.The retention period + // on a locked bucket cannot be changed. Locked buckets may only be + // deleted if they are empty. Locked bool `json:"locked,omitempty"` // Name: Output only. The resource name of the bucket.For @@ -1862,18 +2707,18 @@ type LogBucket struct { // changed. Name string `json:"name,omitempty"` - // RestrictedFields: Log entry field paths that are denied access in - // this bucket.The following fields and their children are eligible: - // textPayload, jsonPayload, protoPayload, httpRequest, labels, - // sourceLocation.Restricting a repeated field will restrict all values. - // Adding a parent will block all child fields. (e.g. foo.bar will block - // foo.bar.baz) + // RestrictedFields: Optional. Log entry field paths that are denied + // access in this bucket.The following fields and their children are + // eligible: textPayload, jsonPayload, protoPayload, httpRequest, + // labels, sourceLocation.Restricting a repeated field will restrict all + // values. Adding a parent will block all child fields. (e.g. foo.bar + // will block foo.bar.baz) RestrictedFields []string `json:"restrictedFields,omitempty"` - // RetentionDays: Logs will be retained by default for this amount of - // time, after which they will automatically be deleted. The minimum - // retention period is 1 day. If this value is set to zero at bucket - // creation time, the default time of 30 days will be used. + // RetentionDays: Optional. Logs will be retained by default for this + // amount of time, after which they will automatically be deleted. The + // minimum retention period is 1 day. If this value is set to zero at + // bucket creation time, the default time of 30 days will be used. RetentionDays int64 `json:"retentionDays,omitempty"` // UpdateTime: Output only. The last update timestamp of the bucket. @@ -1883,7 +2728,7 @@ type LogBucket struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CmekSettings") to + // ForceSendFields is a list of field names (e.g. "AnalyticsEnabled") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1891,12 +2736,13 @@ type LogBucket struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CmekSettings") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AnalyticsEnabled") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -1908,6 +2754,16 @@ func (s *LogBucket) MarshalJSON() ([]byte, error) { // LogEntry: An individual entry in a log. type LogEntry struct { + // ErrorGroups: Output only. The Error Reporting + // (https://cloud.google.com/error-reporting) error groups associated + // with this LogEntry. Error Reporting sets the values for this field + // during error group creation.For more information, see View error + // details( + // https://cloud.google.com/error-reporting/docs/viewing-errors#view_error_details)This + // field isn't available during log routing + // (https://cloud.google.com/logging/docs/routing/overview) + ErrorGroups []*LogErrorGroup `json:"errorGroups,omitempty"` + // HttpRequest: Optional. Information about the HTTP request associated // with this log entry, if applicable. HttpRequest *HttpRequest `json:"httpRequest,omitempty"` @@ -1953,7 +2809,7 @@ type LogEntry struct { // include the following characters: upper and lower case alphanumeric // characters, forward-slash, underscore, hyphen, and period.For // backward compatibility, if log_name begins with a forward-slash, such - // as /projects/..., then the log entry is ingested as usual, but the + // as /projects/..., then the log entry is processed as usual, but the // forward-slash is removed. Listing the log entry will not show the // leading slash and filtering for a log name with a leading slash will // never return any results. @@ -2007,10 +2863,28 @@ type LogEntry struct { // with the log entry, if any. SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"` - // SpanId: Optional. The span ID within the trace associated with the - // log entry.For Trace spans, this is the same format that the Trace API - // v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such - // as 000000000000004a. + // SpanId: Optional. The ID of the Cloud Trace + // (https://cloud.google.com/trace) span associated with the current + // operation in which the log is being written. For example, if a span + // has the REST resource name of + // "projects/some-project/traces/some-trace/spans/some-span-id", then + // the span_id field is "some-span-id".A Span + // (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) + // represents a single operation within a trace. Whereas a trace may + // involve multiple different microservices running on multiple + // different machines, a span generally corresponds to a single logical + // operation being performed in a single instance of a microservice on + // one specific machine. Spans are the nodes within the tree that is a + // trace.Applications that are instrumented for tracing + // (https://cloud.google.com/trace/docs/setup) will generally assign a + // new, unique span ID on each incoming request. It is also common to + // create and record additional spans corresponding to internal + // processing elements as well as issuing requests to dependencies.The + // span ID is expected to be a 16-character, hexadecimal encoding of an + // 8-byte array and should not be zero. It should be unique within the + // trace and should, ideally, be generated in a manner that is uniformly + // random.Example values: 000000000000004a 7a2190356c3fc94b + // 0000f00300090021 d39223e101960076 SpanId string `json:"spanId,omitempty"` // Split: Optional. Information indicating this LogEntry is part of a @@ -2030,25 +2904,29 @@ type LogEntry struct { // must have timestamps that don't exceed the logs retention period // (https://cloud.google.com/logging/quotas#logs_retention_periods) in // the past, and that don't exceed 24 hours in the future. Log entries - // outside those time boundaries aren't ingested by Logging. + // outside those time boundaries are rejected by Logging. Timestamp string `json:"timestamp,omitempty"` - // Trace: Optional. Resource name of the trace associated with the log - // entry, if any. If it contains a relative resource name, the name is - // assumed to be relative to //tracing.googleapis.com. Example: - // projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824 + // Trace: Optional. The REST resource name of the trace being written to + // Cloud Trace (https://cloud.google.com/trace) in association with this + // log entry. For example, if your trace data is stored in the Cloud + // project "my-trace-project" and if the service that is creating the + // log entry receives a trace header that includes the trace ID "12345", + // then the service should use + // "projects/my-trace-project/traces/12345".The trace field provides the + // link between logs and traces. By using this field, you can navigate + // from a log entry to a trace. Trace string `json:"trace,omitempty"` - // TraceSampled: Optional. The sampling decision of the trace associated - // with the log entry.True means that the trace resource name in the - // trace field was sampled for storage in a trace backend. False means - // that the trace was not sampled for storage when this log entry was - // written, or the sampling decision was unknown at the time. A + // TraceSampled: Optional. The sampling decision of the span associated + // with the log entry at the time the log entry was created. This field + // corresponds to the sampled flag in the W3C trace-context + // specification (https://www.w3.org/TR/trace-context/#sampled-flag). A // non-sampled trace value is still useful as a request correlation // identifier. The default is False. TraceSampled bool `json:"traceSampled,omitempty"` - // ForceSendFields is a list of field names (e.g. "HttpRequest") to + // ForceSendFields is a list of field names (e.g. "ErrorGroups") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2056,7 +2934,7 @@ type LogEntry struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HttpRequest") to include + // NullFields is a list of field names (e.g. "ErrorGroups") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -2156,6 +3034,41 @@ func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LogErrorGroup: Contains metadata that associates the LogEntry to +// Error Reporting error groups. +type LogErrorGroup struct { + // Id: The id is a unique identifier for a particular error group; it is + // the last part of the error group resource name: + // /project/[PROJECT_ID]/errors/[ERROR_GROUP_ID]. Example: + // COShysOX0r_51QE. The id is derived from key parts of the error-log + // content and is treated as Service Data. For information about how + // Service Data is handled, see Google Cloud Privacy Notice + // (https://cloud.google.com/terms/cloud-privacy-notice). + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogErrorGroup) MarshalJSON() ([]byte, error) { + type NoMethod LogErrorGroup + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogExclusion: Specifies a set of log entries that are filtered out by // a sink. If your Google Cloud resource receives a large volume of log // entries, you can use exclusions to reduce your chargeable logs. Note @@ -2185,7 +3098,7 @@ type LogExclusion struct { // severity 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/api_classic.go b/event-exporter/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e3..87c33c798 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/api_common.go b/event-exporter/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b..5b95c13d9 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/api_common.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/identity.go b/event-exporter/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e42..0f95aa91d 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/identity.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/identity_classic.go b/event-exporter/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e..5ad3548bf 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/identity_flex.go b/event-exporter/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e..4201b6b58 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/identity_vm.go b/event-exporter/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d8067263..18ddda3a4 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/main.go b/event-exporter/vendor/google.golang.org/appengine/internal/main.go index 1e765312f..afd0ae84f 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/main.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/main_vm.go b/event-exporter/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333..86a8caf06 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/event-exporter/vendor/google.golang.org/appengine/internal/transaction.go b/event-exporter/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae653..2ae8ab9fa 100644 --- a/event-exporter/vendor/google.golang.org/appengine/internal/transaction.go +++ b/event-exporter/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/event-exporter/vendor/google.golang.org/appengine/namespace.go b/event-exporter/vendor/google.golang.org/appengine/namespace.go index 21860ca08..6f169be48 100644 --- a/event-exporter/vendor/google.golang.org/appengine/namespace.go +++ b/event-exporter/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/event-exporter/vendor/google.golang.org/appengine/timeout.go b/event-exporter/vendor/google.golang.org/appengine/timeout.go index 05642a992..fcf3ad0a5 100644 --- a/event-exporter/vendor/google.golang.org/appengine/timeout.go +++ b/event-exporter/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/event-exporter/vendor/google.golang.org/appengine/travis_install.sh b/event-exporter/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46..000000000 --- a/event-exporter/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/event-exporter/vendor/google.golang.org/appengine/travis_test.sh b/event-exporter/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045..000000000 --- a/event-exporter/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/event-exporter/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/event-exporter/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d9..6c0d72418 100644 --- a/event-exporter/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/event-exporter/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 125880315..cc5d52fbc 100644 --- a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/code.proto package code @@ -37,7 +37,6 @@ const ( // The canonical error codes for gRPC APIs. // -// // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. @@ -45,7 +44,7 @@ const ( type Code int32 const ( - // Not an error; returned on success + // Not an error; returned on success. // // HTTP Mapping: 200 OK Code_OK Code = 0 @@ -79,7 +78,7 @@ const ( // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, + // of users, such as gradual feature rollout or undocumented allowlist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. @@ -119,15 +118,16 @@ const ( // // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. // // HTTP Mapping: 400 Bad Request Code_FAILED_PRECONDITION Code = 9 diff --git a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 1c7b93ec1..7bd161e48 100644 --- a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/error_details.proto package errdetails @@ -36,6 +36,112 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -61,7 +167,7 @@ type RetryInfo struct { func (x *RetryInfo) Reset() { *x = RetryInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -74,7 +180,7 @@ func (x *RetryInfo) String() string { func (*RetryInfo) ProtoMessage() {} func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -87,7 +193,7 @@ func (x *RetryInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} } func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { @@ -112,7 +218,7 @@ type DebugInfo struct { func (x *DebugInfo) Reset() { *x = DebugInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +231,7 @@ func (x *DebugInfo) String() string { func (*DebugInfo) ProtoMessage() {} func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +244,7 @@ func (x *DebugInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} } func (x *DebugInfo) GetStackEntries() []string { @@ -178,7 +284,7 @@ type QuotaFailure struct { func (x *QuotaFailure) Reset() { *x = QuotaFailure{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -191,7 +297,7 @@ func (x *QuotaFailure) String() string { func (*QuotaFailure) ProtoMessage() {} func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -204,7 +310,7 @@ func (x *QuotaFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} } func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { @@ -214,111 +320,6 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { return nil } -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -495,7 +496,8 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e..a6b508188 100644 --- a/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/event-exporter/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/event-exporter/vendor/google.golang.org/grpc/CONTRIBUTING.md b/event-exporter/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..608aa6e1a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/event-exporter/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use @@ -53,9 +66,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/event-exporter/vendor/google.golang.org/grpc/README.md b/event-exporter/vendor/google.golang.org/grpc/README.md index 0e6ae69a5..ab0fbb79b 100644 --- a/event-exporter/vendor/google.golang.org/grpc/README.md +++ b/event-exporter/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/event-exporter/vendor/google.golang.org/grpc/attributes/attributes.go b/event-exporter/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac1..52d530d7a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/event-exporter/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,36 +19,41 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -58,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -88,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -99,3 +103,39 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/event-exporter/vendor/google.golang.org/grpc/backoff.go b/event-exporter/vendor/google.golang.org/grpc/backoff.go index 542594f5c..29475e31c 100644 --- a/event-exporter/vendor/google.golang.org/grpc/backoff.go +++ b/event-exporter/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer/balancer.go b/event-exporter/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451..d79560a2e 100644 --- a/event-exporter/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/event-exporter/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,8 +27,10 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -38,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -50,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -69,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -104,11 +120,23 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -123,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -144,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -192,7 +233,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. @@ -243,8 +284,8 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. - ServerLoad interface{} + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any } var ( @@ -273,6 +314,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -329,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -371,55 +424,19 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure -} +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer/base/balancer.go b/event-exporter/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3a..a7f1eeec8 100644 --- a/event-exporter/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/event-exporter/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -104,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -120,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -134,6 +140,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -153,8 +162,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) @@ -173,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -200,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -222,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/event-exporter/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/event-exporter/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f85..f7031ad22 100644 --- a/event-exporter/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/event-exporter/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/event-exporter/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index f4ea61746..000000000 --- a/event-exporter/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,292 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" -) - -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} -} - -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { - ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), - } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) - return ccb -} - -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch u := t.(type) { - case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - return - } - } -} - -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() -} - -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false - } - ccb.updateCh.Put(exitIdle{}) - return true -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, - }) -} - -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) -} - -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - ccb.balancer.ResolverError(err) -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } - ac, err := ccb.cc.newAddrConn(addrs, opts) - if err != nil { - return nil, err - } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() - ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) -} - -func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - acbw.UpdateAddresses(addrs) -} - -func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) - ccb.cc.csMgr.updateState(s.ConnectivityState) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - ccb.cc.resolveNow(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } - - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } -} - -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - go acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} diff --git a/event-exporter/vendor/google.golang.org/grpc/balancer_wrapper.go b/event-exporter/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 000000000..b5e30cff0 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,380 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + }) + if !ok { + return nil + } + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/event-exporter/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/event-exporter/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cd..e9e97d451 100644 --- a/event-exporter/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/event-exporter/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.31.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -434,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/event-exporter/vendor/google.golang.org/grpc/call.go b/event-exporter/vendor/google.golang.org/grpc/call.go index 9e20e4d38..788c89c16 100644 --- a/event-exporter/vendor/google.golang.org/grpc/call.go +++ b/event-exporter/vendor/google.golang.org/grpc/call.go @@ -26,7 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/event-exporter/vendor/google.golang.org/grpc/channelz/channelz.go b/event-exporter/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/event-exporter/vendor/google.golang.org/grpc/clientconn.go b/event-exporter/vendor/google.golang.org/grpc/clientconn.go index 28f09dc87..f6e815e6b 100644 --- a/event-exporter/vendor/google.golang.org/grpc/clientconn.go +++ b/event-exporter/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -34,10 +33,11 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -46,16 +46,14 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -69,6 +67,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -79,7 +80,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -116,6 +117,80 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + // Apply dial options. + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -133,25 +208,21 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } - cc.retryThrottler.Store((*retryThrottler)(nil)) - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - - for _, opt := range opts { - opt.apply(&cc.dopts) + cc, err := newClient(target, opts...) + if err != nil { + return nil, err } - chainUnaryClientInterceptors(cc) - chainStreamClientInterceptors(cc) + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. defer func() { if err != nil { @@ -159,58 +230,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") - } - cc.csMgr.channelzID = cc.channelzID - } - - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } - } - - if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if scpr.Err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) - } - cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err } - cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -233,101 +260,173 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } } - default: } - } - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { - return nil, err - } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) - if err != nil { - return nil, err - } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - - if cc.dopts.scChan != nil && !scSet { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err } - case <-ctx.Done(): return nil, ctx.Err() } } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } +} - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing } + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { + return err } + + cc.addTraceEvent("exiting idle mode") + return nil +} + +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() - cc.resolverWrapper = rWrapper + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() + cc.csMgr.updateState(connectivity.Idle) + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -344,7 +443,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -356,7 +455,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -392,13 +491,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -414,6 +527,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -443,7 +558,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -464,35 +579,41 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + idlenessMgr *idle.Manager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData - lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } @@ -500,7 +621,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -519,7 +640,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -531,38 +652,20 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) return } - for ac := range cc.conns { - go ac.connect() - } -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -592,6 +695,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -606,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -623,9 +735,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +763,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +774,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,91 +788,74 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. -// -// Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return nil, ErrClientConnClosing + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - } + }, + }) + + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -810,7 +885,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -837,67 +912,94 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -958,15 +1060,11 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,45 +1089,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } - } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + var newBalancerName string + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1041,7 +1120,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1056,51 +1135,47 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil - bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() - cc.blockingpicker.close() + cc.resolverWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.pickerWrapper.close() + cc.balancerWrapper.close() - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1111,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1125,12 +1200,13 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1139,9 +1215,16 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1160,7 +1243,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1188,15 +1272,16 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if acCtx.Err() != nil { + // addrConn was torn down. ac.mu.Unlock() return } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1211,13 +1296,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1232,14 +1317,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1253,7 +1337,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1270,113 +1354,84 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu + hctx, hcancel := context.WithCancel(ctx) - onClose := func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() - } - - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) - return err - } + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1422,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1446,7 +1501,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1470,6 +1525,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1487,30 +1565,43 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1598,6 +1689,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1619,7 +1713,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1627,11 +1728,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1646,51 +1748,98 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - Endpoint: endpoint, - URL: *u, - }, nil + + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1707,25 +1856,21 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) - } - - switch { - case authorityFromDialOption != "": - return authorityFromDialOption, nil - case authorityFromCreds != "": - return authorityFromCreds, nil - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - return "localhost", nil - case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + if authorityFromDialOption != "" { + cc.authority = authorityFromDialOption + } else if authorityFromCreds != "" { + cc.authority = authorityFromCreds + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { + cc.authority = "localhost" + endpoint + } else { + cc.authority = encodeAuthority(endpoint) } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil } diff --git a/event-exporter/vendor/google.golang.org/grpc/codec.go b/event-exporter/vendor/google.golang.org/grpc/codec.go index 129776547..411e3dfd4 100644 --- a/event-exporter/vendor/google.golang.org/grpc/codec.go +++ b/event-exporter/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/event-exporter/vendor/google.golang.org/grpc/codes/code_string.go b/event-exporter/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a578..934fac2b0 100644 --- a/event-exporter/vendor/google.golang.org/grpc/codes/code_string.go +++ b/event-exporter/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/codes/codes.go b/event-exporter/vendor/google.golang.org/grpc/codes/codes.go index 11b106182..08476ad1f 100644 --- a/event-exporter/vendor/google.golang.org/grpc/codes/codes.go +++ b/event-exporter/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/event-exporter/vendor/google.golang.org/grpc/credentials/credentials.go b/event-exporter/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e..5feac3aa0 100644 --- a/event-exporter/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/event-exporter/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/event-exporter/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/event-exporter/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4fbed1256..82bee1443 100644 --- a/event-exporter/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/event-exporter/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/credentials/tls.go b/event-exporter/vendor/google.golang.org/grpc/credentials/tls.go index 784822d05..5dafd34ed 100644 --- a/event-exporter/vendor/google.golang.org/grpc/credentials/tls.go +++ b/event-exporter/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -166,7 +210,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -195,7 +239,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/event-exporter/vendor/google.golang.org/grpc/dialoptions.go b/event-exporter/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e..ba2426180 100644 --- a/event-exporter/vendor/google.golang.org/grpc/dialoptions.go +++ b/event-exporter/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,35 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +58,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -66,6 +77,8 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -73,10 +86,12 @@ type DialOption interface { apply(*dialOptions) } +var globalDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -84,6 +99,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -100,13 +125,42 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -116,8 +170,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -195,38 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -275,6 +298,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -286,7 +312,10 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,8 +333,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -315,7 +344,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -346,7 +375,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -371,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -402,7 +442,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -414,7 +468,10 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -428,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -494,11 +551,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -539,9 +596,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -559,7 +613,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -581,12 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -606,7 +664,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -615,3 +673,46 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/event-exporter/vendor/google.golang.org/grpc/encoding/encoding.go b/event-exporter/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7..5ebf88d71 100644 --- a/event-exporter/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/event-exporter/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -36,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -49,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -73,6 +70,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -85,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/event-exporter/vendor/google.golang.org/grpc/encoding/proto/proto.go b/event-exporter/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35af..0ee3d3bae 100644 --- a/event-exporter/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/event-exporter/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/event-exporter/vendor/google.golang.org/grpc/grpclog/component.go b/event-exporter/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2..ac73c9ced 100644 --- a/event-exporter/vendor/google.golang.org/grpc/grpclog/component.go +++ b/event-exporter/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/event-exporter/vendor/google.golang.org/grpc/grpclog/grpclog.go b/event-exporter/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34..16928c9cb 100644 --- a/event-exporter/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/event-exporter/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/event-exporter/vendor/google.golang.org/grpc/grpclog/logger.go b/event-exporter/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822..b1674d826 100644 --- a/event-exporter/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/event-exporter/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/event-exporter/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/event-exporter/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f66409..ecfd36d71 100644 --- a/event-exporter/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/event-exporter/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -34,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -183,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -242,18 +241,18 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/event-exporter/vendor/google.golang.org/grpc/interceptor.go b/event-exporter/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf..877d78fc3 100644 --- a/event-exporter/vendor/google.golang.org/grpc/interceptor.go +++ b/event-exporter/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,22 +66,25 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -98,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/event-exporter/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..fed1c011a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/event-exporter/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..3c594e6e4 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/balancerload/load.go b/event-exporter/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d966..94a08d687 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..755fdebc1 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,38 +28,48 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +78,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +110,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..f9e80e27a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831..0f31274a3 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -26,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +67,12 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +83,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -146,10 +169,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -182,19 +205,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -207,10 +230,10 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -247,10 +270,10 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -287,15 +310,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -311,7 +334,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -354,15 +377,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b3..264de387c 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/event-exporter/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c120..11f91668a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -28,42 +31,50 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any + closed bool + closing bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return errBufferClosed + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() - return + return nil default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -71,8 +82,9 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +92,25 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -func (b *Unbounded) Get() <-chan interface{} { +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { return b.c } + +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return + } + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543..fc094f344 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,13 +24,14 @@ package channelz import ( - "fmt" + "errors" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -38,8 +39,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -49,14 +53,21 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -94,49 +105,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. -// -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) - idGen.reset() - return func() error { - var err error - cm := db.get() - if cm == nil { - return nil - } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. - return nil - } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) - } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -188,54 +156,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { - id := idGen.genID() +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { + id := IDGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil } - id := idGen.genID() + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { - id := idGen.genID() +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +235,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil } - id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil } - id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +338,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -724,14 +747,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/id.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/logging.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8..f89e6f77b 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/types.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154..1d4020f53 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { @@ -686,12 +691,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e..98288c3f8 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc402..b5568b22e 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/event-exporter/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b5903..9deee7f65 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f0272543..685a3cb41 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,15 +21,49 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..dd314cfb1 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9bad03cec..29f234acb 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,31 +20,21 @@ package envconfig import ( "os" - "strings" ) const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -60,38 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/experimental.go b/event-exporter/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7f7044e17 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258..bfc45102a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -110,17 +110,17 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e96..faa998de7 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,10 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b..aa97273e7 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() @@ -65,3 +72,24 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000..f7f40a16a --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } + } + + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000..6635f7bca --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 000000000..aef8cec1a --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..9f4090967 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060..ec62b4775 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/idle/idle.go b/event-exporter/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 000000000..fe49cb74c --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/internal.go b/event-exporter/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf35..6c7ea6a53 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/internal.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,11 +38,14 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -50,20 +53,149 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -74,7 +206,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. @@ -86,3 +218,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/event-exporter/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf3..900bfb716 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -32,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false @@ -72,3 +75,58 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/event-exporter/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..703319137 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948..f0603871c 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c514..b66dcb213 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,53 +47,37 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +87,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -114,9 +98,10 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.Authority) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -159,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -176,23 +151,26 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -218,28 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -278,7 +255,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +285,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +332,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +347,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -377,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -385,12 +366,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c7fc557d0 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e..afac56572 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59d..27cd81af9 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil @@ -60,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 000000000..11d82afcc --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf59..51e733e49 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/status/status.go b/event-exporter/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513ed..03ef2fedd 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/status/status.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { @@ -164,3 +192,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 000000000..4f347edd4 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..078137b7f --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..fd7d43a89 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252d..83c382982 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -29,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -38,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -47,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -59,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -137,6 +139,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -190,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -208,6 +211,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -325,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -362,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -376,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -407,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -477,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -495,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -512,23 +527,25 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } + if !isIOError(err) { + l.framer.writer.Flush() + } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -573,7 +590,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -582,11 +598,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -594,10 +610,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -605,13 +620,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -619,15 +632,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -654,19 +666,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -681,8 +694,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -720,10 +733,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -732,7 +745,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -743,9 +755,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -762,8 +773,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -786,6 +798,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -793,7 +810,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -811,10 +829,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -824,7 +842,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -832,19 +850,24 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -863,7 +886,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -880,9 +902,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true @@ -897,7 +919,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/defaults.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e..bc8ee0747 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b..a9d70e2a1 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,40 +47,65 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +123,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -120,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -138,16 +168,26 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -202,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -225,18 +267,19 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -269,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -314,10 +357,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -326,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -346,37 +387,22 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - ht.stats.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -435,17 +461,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d337..eff879964 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,11 +36,15 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/internal/syscall" + istatus "google.golang.org/grpc/internal/status" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -55,13 +59,19 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -78,6 +88,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -90,7 +101,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -98,17 +109,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -132,15 +141,15 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -170,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -192,7 +201,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -212,14 +221,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -230,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -251,15 +286,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -297,6 +324,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -304,26 +333,27 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -341,38 +371,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -392,14 +434,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -410,17 +450,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -465,8 +502,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -502,9 +540,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -525,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -584,7 +635,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -613,7 +668,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -629,18 +691,17 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +710,23 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -675,17 +748,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -702,7 +771,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } firstTry := true var ch chan struct{} - checkForStreamQuota := func(it interface{}) bool { + transportDrainRequired := false + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -717,8 +787,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -728,7 +810,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -743,23 +825,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + success, err := t.controlBuf.executeAndPut(func(it any) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +844,38 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } - t.statsHandler.HandleRPC(s.ctx, outHeader) + t.GracefulClose() } return s, nil } @@ -851,7 +937,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -872,20 +958,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -898,9 +985,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +1002,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -937,11 +1022,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1001,13 +1090,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1098,8 +1187,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1154,7 +1243,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1181,10 +1270,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1213,12 +1304,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1226,24 +1319,33 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + streamsToClose = append(streamsToClose, stream) } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1305,7 +1407,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1340,12 +1441,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1411,14 +1506,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1426,29 +1522,26 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } - if t.statsHandler != nil { - if isHeader { + for _, sh := range t.statsHandlers { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } @@ -1456,42 +1549,43 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1694,3 +1788,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e5..a206e2eef 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,12 +35,16 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,10 +56,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -64,25 +68,22 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -101,13 +102,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -117,7 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -127,6 +128,8 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -159,21 +162,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -231,28 +229,35 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - maxStreams: maxStreams, + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -260,6 +265,8 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -267,25 +274,17 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) - } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -321,23 +320,34 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + select { + case <-t.readerDone: + case <-time.After(time.Second): } + t.conn.Close() } - t.conn.Close() - t.controlBuf.finish() - close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -353,33 +363,32 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -390,11 +399,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -405,23 +426,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -435,26 +456,47 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if logger.V(logLevel) { - logger.Errorf("transport: %v", errMsg) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) - return false + return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -475,18 +517,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -501,7 +536,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -512,28 +547,30 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -544,8 +581,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -560,19 +598,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), - } - t.stats.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -592,22 +617,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -625,19 +653,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(ctx, frame, handle); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -653,8 +678,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -821,7 +846,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -838,8 +863,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -881,10 +906,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -908,7 +930,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -916,8 +938,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -925,12 +947,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -939,10 +976,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } - s.hdrMu.Unlock() return nil } @@ -973,14 +1013,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -990,17 +1030,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1012,12 +1054,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1029,7 +1074,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -1041,10 +1086,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1101,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1116,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1136,20 +1165,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1165,10 +1194,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1195,40 +1221,33 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) - } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { - connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1269,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1293,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1280,18 +1309,14 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1311,19 +1336,18 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + t.framer.writer.Flush() + if retErr != nil { + return false, retErr } return true, nil } @@ -1335,17 +1359,17 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1368,11 +1392,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1404,6 +1428,15 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { + return &peer.Peer{ + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 @@ -1425,6 +1458,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http_util.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf..dc29d590e 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,8 +20,8 @@ package transport import ( "bufio" - "bytes" "encoding/base64" + "errors" "fmt" "io" "math" @@ -30,29 +30,20 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -92,9 +83,10 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -110,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -161,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -257,13 +236,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +251,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,41 +276,45 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int conn net.Conn err error - - onFlush func() } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -339,7 +322,12 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -347,33 +335,64 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -381,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -395,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/logging.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 000000000..42ed2b07a --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/proxy.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987..24fa10325 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/event-exporter/vendor/google.golang.org/grpc/internal/transport/transport.go b/event-exporter/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2b..b7b8fec18 100644 --- a/event-exporter/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/event-exporter/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,8 +34,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -51,7 +53,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -252,6 +254,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -261,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -340,8 +346,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -365,9 +387,11 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() - if !s.headerValid { + + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } + return s.header.Copy(), nil } @@ -403,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -415,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -522,14 +558,15 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + SharedWriteBuffer bool + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +589,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -562,8 +599,10 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -572,8 +611,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -673,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -690,13 +729,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -706,7 +745,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, @@ -741,6 +780,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/event-exporter/vendor/google.golang.org/grpc/metadata/metadata.go b/event-exporter/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819..1e9485fd6 100644 --- a/event-exporter/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/event-exporter/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -41,16 +47,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +69,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,7 +82,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -89,7 +97,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. @@ -147,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -169,8 +183,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } @@ -182,27 +199,58 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false @@ -220,13 +268,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/event-exporter/vendor/google.golang.org/grpc/peer/peer.go b/event-exporter/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..a821ff9b2 100644 --- a/event-exporter/vendor/google.golang.org/grpc/peer/peer.go +++ b/event-exporter/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/event-exporter/vendor/google.golang.org/grpc/picker_wrapper.go b/event-exporter/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb89..bf56faa76 100644 --- a/event-exporter/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/event-exporter/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,21 +26,27 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,15 +91,16 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,28 +121,45 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,19 +167,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -175,3 +204,20 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/event-exporter/vendor/google.golang.org/grpc/pickfirst.go b/event-exporter/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b0..5128f9364 100644 --- a/event-exporter/vendor/google.golang.org/grpc/pickfirst.go +++ b/event-exporter/vendor/google.golang.org/grpc/pickfirst.go @@ -19,15 +19,24 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -36,97 +45,182 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if b.subConn == nil { + b.state = connectivity.TransientFailure } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } - if b.sc != sc { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +229,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/event-exporter/vendor/google.golang.org/grpc/preloader.go b/event-exporter/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad..73bd63364 100644 --- a/event-exporter/vendor/google.golang.org/grpc/preloader.go +++ b/event-exporter/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/event-exporter/vendor/google.golang.org/grpc/regenerate.sh b/event-exporter/vendor/google.golang.org/grpc/regenerate.sh index 58c802f8a..a6f26c8ab 100644 --- a/event-exporter/vendor/google.golang.org/grpc/regenerate.sh +++ b/event-exporter/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -68,7 +69,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +80,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -117,15 +116,8 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/event-exporter/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/event-exporter/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..14aa6f20a --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/event-exporter/vendor/google.golang.org/grpc/resolver/map.go b/event-exporter/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0ee..ada5b9bb7 100644 --- a/event-exporter/vendor/google.golang.org/grpc/resolver/map.go +++ b/event-exporter/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -54,8 +69,9 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -63,18 +79,20 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,127 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/event-exporter/vendor/google.golang.org/grpc/resolver/resolver.go b/event-exporter/vendor/google.golang.org/grpc/resolver/resolver.go index e28b68026..adf89dd9c 100644 --- a/event-exporter/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/event-exporter/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,8 +22,10 @@ package resolver import ( "context" + "fmt" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -39,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -74,28 +77,9 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -108,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -121,29 +102,46 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -172,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -196,6 +220,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -207,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -227,23 +255,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -251,6 +263,29 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -258,8 +293,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } @@ -278,9 +315,12 @@ type Resolver interface { Close() } -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string } diff --git a/event-exporter/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/event-exporter/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index 2c47cd54f..000000000 --- a/event-exporter/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State - - incomingMu sync.Mutex // Synchronizes all the incoming calls. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { - ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) - if err != nil { - return nil, err - } - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) - } - ccr.resolverMu.Unlock() -} - -func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return nil - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil -} - -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) -} - -// NewAddress is called by the resolver implementation to send addresses to gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) -} - -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) -} diff --git a/event-exporter/vendor/google.golang.org/grpc/resolver_wrapper.go b/event-exporter/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..c79bab121 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/event-exporter/vendor/google.golang.org/grpc/rpc_util.go b/event-exporter/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004..a4b6bc687 100644 --- a/event-exporter/vendor/google.golang.org/grpc/rpc_util.go +++ b/event-exporter/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -76,8 +75,8 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -160,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -305,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -328,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +416,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -540,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -548,10 +588,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -572,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -587,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -601,14 +640,18 @@ func encode(c baseCodec, msg interface{}) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -654,14 +697,15 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -682,17 +726,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -704,13 +748,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -718,7 +762,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -745,23 +789,25 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -821,19 +867,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/event-exporter/vendor/google.golang.org/grpc/server.go b/event-exporter/vendor/google.golang.org/grpc/server.go index eadf9e05f..e89c5ac61 100644 --- a/event-exporter/vendor/google.golang.org/grpc/server.go +++ b/event-exporter/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -70,15 +70,25 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -91,26 +101,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream + mdata any } // Server is a gRPC server to serve RPC requests. @@ -132,12 +136,14 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -149,8 +155,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -161,19 +168,25 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +196,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,10 +220,41 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -218,11 +262,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -247,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -298,7 +341,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -361,6 +404,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -419,7 +465,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +481,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +522,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +543,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +558,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -512,6 +572,44 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -520,39 +618,31 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + f() } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) - } -} - -func (s *Server) stopServerWorkers() { + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) + go s.serverWorker() } } @@ -560,6 +650,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,15 +677,14 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -600,7 +692,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -615,14 +707,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -633,7 +725,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -674,7 +766,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -712,7 +804,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +816,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -736,6 +827,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -759,11 +862,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +871,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -840,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain() - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -866,13 +969,14 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -887,7 +991,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -897,38 +1001,44 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } - var roundRobinCounter uint32 - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- f: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - } - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) + go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -945,70 +1055,47 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1058,7 +1145,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1075,8 +1162,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1103,40 +1192,35 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1154,13 +1238,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1168,7 +1252,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1180,10 +1264,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { - ctx := stream.Context() + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1202,7 +1292,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1212,6 +1304,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1232,60 +1325,71 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { - sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + for _, sh := range shs { + sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1294,18 +1398,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } } return appErr } @@ -1314,7 +1424,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1331,26 +1446,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1361,14 +1484,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1393,60 +1518,57 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(ctx, statsBegin) + } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1454,7 +1576,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1462,7 +1584,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(ctx, end) + } } if channelz.IsOn() { @@ -1475,8 +1599,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1495,7 +1626,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(ctx, logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1517,12 +1650,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1532,7 +1671,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1549,7 +1688,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1558,13 +1699,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1573,57 +1717,93 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } } - return err + return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1632,19 +1812,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1654,7 +1834,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1669,7 +1849,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1684,7 +1864,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1699,94 +1879,88 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close() - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) } - }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1800,17 +1974,70 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1822,8 +2049,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1835,8 +2068,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1865,3 +2156,53 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/event-exporter/vendor/google.golang.org/grpc/service_config.go b/event-exporter/vendor/google.golang.org/grpc/service_config.go index 22c4240cf..0df11fc09 100644 --- a/event-exporter/vendor/google.golang.org/grpc/service_config.go +++ b/event-exporter/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -57,10 +55,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -107,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -130,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -202,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -218,7 +171,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -227,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -253,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -284,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -313,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -333,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } @@ -381,6 +321,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/event-exporter/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/event-exporter/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f9266..35e7a20a0 100644 --- a/event-exporter/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/event-exporter/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/event-exporter/vendor/google.golang.org/grpc/shared_buffer_pool.go b/event-exporter/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 000000000..48a64cfe8 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/event-exporter/vendor/google.golang.org/grpc/stats/stats.go b/event-exporter/vendor/google.golang.org/grpc/stats/stats.go index 0285dcc6a..4ab70e2d4 100644 --- a/event-exporter/vendor/google.golang.org/grpc/stats/stats.go +++ b/event-exporter/vendor/google.golang.org/grpc/stats/stats.go @@ -59,18 +59,36 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -126,12 +144,18 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/event-exporter/vendor/google.golang.org/grpc/status/status.go b/event-exporter/vendor/google.golang.org/grpc/status/status.go index 6d163b6e3..a93360efb 100644 --- a/event-exporter/vendor/google.golang.org/grpc/status/status.go +++ b/event-exporter/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -76,22 +76,52 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -103,19 +133,16 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } // FromContextError converts a context error or wrapped context error into a diff --git a/event-exporter/vendor/google.golang.org/grpc/stream.go b/event-exporter/vendor/google.golang.org/grpc/stream.go index 625d47b34..d621f52b1 100644 --- a/event-exporter/vendor/google.golang.org/grpc/stream.go +++ b/event-exporter/vendor/google.golang.org/grpc/stream.go @@ -31,13 +31,16 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -45,12 +48,16 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -75,9 +82,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -86,7 +93,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -119,7 +128,10 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -128,7 +140,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -137,13 +149,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -164,6 +176,30 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -187,6 +223,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -293,20 +336,35 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -320,7 +378,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } if desc != unaryStreamDesc { @@ -341,14 +401,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -377,58 +443,81 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -454,7 +543,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -486,12 +575,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -504,8 +593,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +619,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +645,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +669,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +717,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +744,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -679,6 +761,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -695,7 +789,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -709,12 +803,21 @@ func (cs *clientStream) Header() (metadata.MD, error) { m, err = a.s.Header() return toRPCErr(err) }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -723,10 +826,13 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } - return m, err + + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +850,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -767,7 +872,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -795,61 +900,46 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } } - return + return err } -func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if cs.binlog != nil { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - } } return err } @@ -869,10 +959,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } } // We never returned an error here for reasons. return nil @@ -889,6 +982,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -899,16 +995,31 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ - OnClientSide: true, - }) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -923,7 +1034,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -941,8 +1052,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -950,9 +1061,9 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -980,6 +1091,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -989,15 +1101,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1036,12 +1149,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1049,7 +1162,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1057,7 +1170,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1163,17 +1276,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1236,7 +1354,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1281,7 +1399,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1362,8 +1480,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1395,7 +1515,10 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1404,7 +1527,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1420,13 +1543,15 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,17 +1571,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } return err } @@ -1465,10 +1602,13 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1476,7 +1616,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1497,6 +1637,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1510,25 +1657,33 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1536,7 +1691,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1557,13 +1712,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } } return err } @@ -1572,20 +1730,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } } return nil } @@ -1599,7 +1763,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/event-exporter/vendor/google.golang.org/grpc/tap/tap.go b/event-exporter/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb..07f012576 100644 --- a/event-exporter/vendor/google.golang.org/grpc/tap/tap.go +++ b/event-exporter/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/event-exporter/vendor/google.golang.org/grpc/trace.go b/event-exporter/vendor/google.golang.org/grpc/trace.go index 07a2d26b3..9ded79321 100644 --- a/event-exporter/vendor/google.golang.org/grpc/trace.go +++ b/event-exporter/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/event-exporter/vendor/google.golang.org/grpc/version.go b/event-exporter/vendor/google.golang.org/grpc/version.go index 9d3fd73da..f1aec4c0a 100644 --- a/event-exporter/vendor/google.golang.org/grpc/version.go +++ b/event-exporter/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.1-dev" +const Version = "1.61.1" diff --git a/event-exporter/vendor/google.golang.org/grpc/vet.sh b/event-exporter/vendor/google.golang.org/grpc/vet.sh index d923187a7..5da38a409 100644 --- a/event-exporter/vendor/google.golang.org/grpc/vet.sh +++ b/event-exporter/vendor/google.golang.org/grpc/vet.sh @@ -35,22 +35,13 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -66,40 +57,54 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -107,105 +112,79 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" - go mod tidy + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithBalancerName -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/event-exporter/vendor/google.golang.org/protobuf/AUTHORS b/event-exporter/vendor/google.golang.org/protobuf/AUTHORS deleted file mode 100644 index 2b00ddba0..000000000 --- a/event-exporter/vendor/google.golang.org/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/event-exporter/vendor/google.golang.org/protobuf/CONTRIBUTORS b/event-exporter/vendor/google.golang.org/protobuf/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976..000000000 --- a/event-exporter/vendor/google.golang.org/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 07da5db34..f47902371 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -19,11 +20,11 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -113,7 +121,11 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals a message into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -159,10 +171,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } // Get the FieldDescriptor. - var fd pref.FieldDescriptor + var fd protoreflect.FieldDescriptor if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. - extName := pref.FullName(name[1 : len(name)-1]) + extName := protoreflect.FullName(name[1 : len(name)-1]) extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) @@ -240,23 +252,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } } -func isKnownValue(fd pref.FieldDescriptor) bool { +func isKnownValue(fd protoreflect.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname } -func isNullValue(fd pref.FieldDescriptor) bool { +func isNullValue(fd protoreflect.FieldDescriptor) bool { ed := fd.Enum() return ed != nil && ed.FullName() == genid.NullValue_enum_fullname } // unmarshalSingular unmarshals to the non-repeated field specified // by the given FieldDescriptor. -func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { - var val pref.Value +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), false) default: @@ -266,70 +278,72 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } // unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by // the given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { const b32 int = 32 const b64 int = 64 tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if tok.Kind() == json.Bool { - return pref.ValueOfBool(tok.Bool()), nil + return protoreflect.ValueOfBool(tok.Bool()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, ok := unmarshalInt(tok, b32); ok { return v, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, ok := unmarshalInt(tok, b64); ok { return v, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, ok := unmarshalUint(tok, b32); ok { return v, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, ok := unmarshalUint(tok, b64); ok { return v, nil } - case pref.FloatKind: + case protoreflect.FloatKind: if v, ok := unmarshalFloat(tok, b32); ok { return v, nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if v, ok := unmarshalFloat(tok, b64); ok { return v, nil } - case pref.StringKind: + case protoreflect.StringKind: if tok.Kind() == json.String { - return pref.ValueOfString(tok.ParsedString()), nil + return protoreflect.ValueOfString(tok.ParsedString()), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if v, ok := unmarshalBytes(tok); ok { return v, nil } - case pref.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -337,10 +351,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } -func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getInt(tok, bitSize) @@ -349,30 +363,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getInt(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getInt(tok json.Token, bitSize int) (pref.Value, bool) { +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Int(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfInt32(int32(n)), true + return protoreflect.ValueOfInt32(int32(n)), true } - return pref.ValueOfInt64(n), true + return protoreflect.ValueOfInt64(n), true } -func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getUint(tok, bitSize) @@ -381,30 +395,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getUint(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getUint(tok json.Token, bitSize int) (pref.Value, bool) { +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Uint(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfUint32(uint32(n)), true + return protoreflect.ValueOfUint32(uint32(n)), true } - return pref.ValueOfUint64(n), true + return protoreflect.ValueOfUint64(n), true } -func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getFloat(tok, bitSize) @@ -414,49 +428,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { switch s { case "NaN": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.NaN())), true + return protoreflect.ValueOfFloat32(float32(math.NaN())), true } - return pref.ValueOfFloat64(math.NaN()), true + return protoreflect.ValueOfFloat64(math.NaN()), true case "Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(+1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true } - return pref.ValueOfFloat64(math.Inf(+1)), true + return protoreflect.ValueOfFloat64(math.Inf(+1)), true case "-Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(-1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true } - return pref.ValueOfFloat64(math.Inf(-1)), true + return protoreflect.ValueOfFloat64(math.Inf(-1)), true } // Decode number from string. if len(s) != len(strings.TrimSpace(s)) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getFloat(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Float(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfFloat32(float32(n)), true + return protoreflect.ValueOfFloat32(float32(n)), true } - return pref.ValueOfFloat64(n), true + return protoreflect.ValueOfFloat64(n), true } -func unmarshalBytes(tok json.Token) (pref.Value, bool) { +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { if tok.Kind() != json.String { - return pref.Value{}, false + return protoreflect.Value{}, false } s := tok.ParsedString() @@ -469,36 +483,39 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) { } b, err := enc.DecodeString(s) if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } - return pref.ValueOfBytes(b), true + return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. s := tok.ParsedString() - if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), true + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true } case json.Number: if n, ok := tok.Int(32); ok { - return pref.ValueOfEnum(pref.EnumNumber(n)), true + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true } case json.Null: // This is only valid for google.protobuf.NullValue. if isNullValue(fd) { - return pref.ValueOfEnum(0), true + return protoreflect.ValueOfEnum(0), true } } - return pref.Value{}, false + return protoreflect.Value{}, false } -func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -508,7 +525,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: for { tok, err := d.Peek() if err != nil { @@ -542,14 +559,16 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } return nil } -func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -561,18 +580,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside the for loop // below. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { val := mmap.NewValue() if err := d.unmarshalMessage(val.Message(), false); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return val, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil @@ -618,7 +638,7 @@ Loop: // unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. // A map key type is any integral or string type. -func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { const b32 = 32 const b64 = 64 const base10 = 10 @@ -626,40 +646,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref. name := tok.Name() kind := fd.Kind() switch kind { - case pref.StringKind: - return pref.ValueOfString(name).MapKey(), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil - case pref.BoolKind: + case protoreflect.BoolKind: switch name { case "true": - return pref.ValueOfBool(true).MapKey(), nil + return protoreflect.ValueOfBool(true).MapKey(), nil case "false": - return pref.ValueOfBool(false).MapKey(), nil + return protoreflect.ValueOfBool(false).MapKey(), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, err := strconv.ParseInt(name, base10, b32); err == nil { - return pref.ValueOfInt32(int32(n)).MapKey(), nil + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, err := strconv.ParseInt(name, base10, b64); err == nil { - return pref.ValueOfInt64(int64(n)).MapKey(), nil + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, err := strconv.ParseUint(name, base10, b32); err == nil { - return pref.ValueOfUint32(uint32(n)).MapKey(), nil + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, err := strconv.ParseUint(name, base10, b64); err == nil { - return pref.ValueOfUint64(uint64(n)).MapKey(), nil + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil } default: panic(fmt.Sprintf("invalid kind for map key: %v", kind)) } - return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 00ea2fecf..ae71007c1 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -4,8 +4,8 @@ // Package protojson marshals and unmarshals protocol buffer messages as JSON // format. It follows the guide at -// https://developers.google.com/protocol-buffers/docs/proto3#json. +// https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index ba971f078..3f75098b6 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -18,7 +18,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -32,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -82,6 +81,25 @@ type MarshalOptions struct { // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -103,17 +121,23 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -121,7 +145,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -129,7 +153,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} @@ -164,8 +188,8 @@ type typeURLFieldRanger struct { typeURL string } -func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { return } m.FieldRanger.Range(f) @@ -173,9 +197,13 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ pref.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message -func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) @@ -184,10 +212,13 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { - v = pref.Value{} // use invalid value to emit null + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { return @@ -199,7 +230,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b // marshalMessage marshals the fields in the given protoreflect.Message. // If the typeURL is non-empty, then a synthetic "@type" field is injected // containing the URL as the value. -func (e encoder) marshalMessage(m pref.Message, typeURL string) error { +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } @@ -212,15 +243,18 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} } var err error - order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { name = fd.TextName() @@ -238,7 +272,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } // marshalValue marshals the given protoreflect.Value. -func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(val.List(), fd) @@ -251,44 +285,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { if !val.IsValid() { e.WriteNull() return nil } switch kind := fd.Kind(); kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: if e.WriteString(val.String()) != nil { return errors.InvalidUTF8(string(fd.FullName())) } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: e.WriteUint(val.Uint()) - case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, - pref.Sfixed64Kind, pref.Fixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: // 64-bit integers are written out as JSON string. e.WriteString(val.String()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: if fd.Enum().FullName() == genid.NullValue_enum_fullname { e.WriteNull() } else { @@ -300,7 +334,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -312,7 +346,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List. -func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { e.StartArray() defer e.EndArray() @@ -326,12 +360,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { } // marshalMap marshals given protoreflect.Map. -func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { e.StartObject() defer e.EndObject() var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { if err = e.WriteName(k.String()); err != nil { return false } diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 72924a905..25329b769 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -17,14 +17,14 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -type marshalFunc func(encoder, pref.Message) error +type marshalFunc func(encoder, protoreflect.Message) error // wellKnownTypeMarshaler returns a marshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { return nil } -type unmarshalFunc func(decoder, pref.Message) error +type unmarshalFunc func(decoder, protoreflect.Message) error // wellKnownTypeUnmarshaler returns a unmarshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { // custom JSON representation, that representation will be embedded adding a // field `value` which holds the custom JSON in addition to the `@type` field. -func (e encoder) marshalAny(m pref.Message) error { +func (e encoder) marshalAny(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) @@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error { return nil } -func (d decoder) unmarshalAny(m pref.Message) error { +func (d decoder) unmarshalAny(m protoreflect.Message) error { // Peek to check for json.ObjectOpen to avoid advancing a read. start, err := d.Peek() if err != nil { @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m pref.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - m.Set(fdType, pref.ValueOfString(typeURL)) - m.Set(fdValue, pref.ValueOfBytes(b)) + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) return nil } @@ -308,53 +308,30 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } } + if open == 0 { + return nil + } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON // object's "value" field. -func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { // Skip ObjectOpen, and start reading the fields. d.Read() @@ -402,13 +379,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro // Wrapper types are encoded as JSON primitives like string, number or boolean. -func (e encoder) marshalWrapperType(m pref.Message) error { +func (e encoder) marshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val := m.Get(fd) return e.marshalSingular(val, fd) } -func (d decoder) unmarshalWrapperType(m pref.Message) error { +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val, err := d.unmarshalScalar(fd) if err != nil { @@ -420,13 +397,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error { // The JSON representation for Empty is an empty JSON object. -func (e encoder) marshalEmpty(pref.Message) error { +func (e encoder) marshalEmpty(protoreflect.Message) error { e.StartObject() e.EndObject() return nil } -func (d decoder) unmarshalEmpty(pref.Message) error { +func (d decoder) unmarshalEmpty(protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -462,12 +439,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error { // The JSON representation for Struct is a JSON object that contains the encoded // Struct.fields map and follows the serialization rules for a map. -func (e encoder) marshalStruct(m pref.Message) error { +func (e encoder) marshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return e.marshalMap(m.Get(fd).Map(), fd) } -func (d decoder) unmarshalStruct(m pref.Message) error { +func (d decoder) unmarshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return d.unmarshalMap(m.Mutable(fd).Map(), fd) } @@ -476,12 +453,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error { // ListValue.values repeated field and follows the serialization rules for a // repeated field. -func (e encoder) marshalListValue(m pref.Message) error { +func (e encoder) marshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return e.marshalList(m.Get(fd).List(), fd) } -func (d decoder) unmarshalListValue(m pref.Message) error { +func (d decoder) unmarshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return d.unmarshalList(m.Mutable(fd).List(), fd) } @@ -490,7 +467,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error { // set. Each of the field in the oneof has its own custom serialization rule. A // Value message needs to be a oneof field set, else it is an error. -func (e encoder) marshalKnownValue(m pref.Message) error { +func (e encoder) marshalKnownValue(m protoreflect.Message) error { od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) fd := m.WhichOneof(od) if fd == nil { @@ -504,19 +481,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error { return e.marshalSingular(m.Get(fd), fd) } -func (d decoder) unmarshalKnownValue(m pref.Message) error { +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { tok, err := d.Peek() if err != nil { return err } - var fd pref.FieldDescriptor - var val pref.Value + var fd protoreflect.FieldDescriptor + var val protoreflect.Value switch tok.Kind() { case json.Null: d.Read() fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) - val = pref.ValueOfEnum(0) + val = protoreflect.ValueOfEnum(0) case json.Bool: tok, err := d.Read() @@ -524,7 +501,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) - val = pref.ValueOfBool(tok.Bool()) + val = protoreflect.ValueOfBool(tok.Bool()) case json.Number: tok, err := d.Read() @@ -550,7 +527,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) - val = pref.ValueOfString(tok.ParsedString()) + val = protoreflect.ValueOfString(tok.ParsedString()) case json.ObjectOpen: fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) @@ -591,7 +568,7 @@ const ( maxSecondsInDuration = 315576000000 ) -func (e encoder) marshalDuration(m pref.Message) error { +func (e encoder) marshalDuration(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) @@ -623,7 +600,7 @@ func (e encoder) marshalDuration(m pref.Message) error { return nil } -func (d decoder) unmarshalDuration(m pref.Message) error { +func (d decoder) unmarshalDuration(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -646,8 +623,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(nanos)) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) return nil } @@ -779,7 +756,7 @@ const ( minTimestampSeconds = -62135596800 ) -func (e encoder) marshalTimestamp(m pref.Message) error { +func (e encoder) marshalTimestamp(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) @@ -805,7 +782,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error { return nil } -func (d decoder) unmarshalTimestamp(m pref.Message) error { +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -814,23 +791,29 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { return d.unexpectedTokenError(tok) } - t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) if err != nil { return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) } - // Validate seconds. No need to validate nanos because time.Parse would have - // covered that already. + // Validate seconds. secs := t.Unix() if secs < minTimestampSeconds || secs > maxTimestampSeconds { return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) return nil } @@ -839,14 +822,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { // lower-camel naming conventions. Encoding should fail if the path name would // end up differently after a round-trip. -func (e encoder) marshalFieldMask(m pref.Message) error { +func (e encoder) marshalFieldMask(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) list := m.Get(fd).List() paths := make([]string, 0, list.Len()) for i := 0; i < list.Len(); i++ { s := list.Get(i).String() - if !pref.FullName(s).IsValid() { + if !protoreflect.FullName(s).IsValid() { return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) } // Return error if conversion to camelCase is not reversible. @@ -861,7 +844,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error { return nil } -func (d decoder) unmarshalFieldMask(m pref.Message) error { +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -880,10 +863,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error { for _, s0 := range paths { s := strs.JSONSnakeCase(s0) - if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) } - list.Append(pref.ValueOfString(s)) + list.Append(protoreflect.ValueOfString(s)) } return nil } diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 179d6e8fc..a45f112bc 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -17,11 +17,11 @@ import ( "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { } // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType var xtErr error var isFieldNumberName bool switch tok.NameKind() { case text.IdentName: - name = pref.Name(tok.IdentName()) + name = protoreflect.Name(tok.IdentName()) fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) + num := protoreflect.FieldNumber(tok.FieldNumber()) if !num.IsValid() { return d.newError(tok.Pos(), "invalid field number: %d", num) } @@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch { case fd.IsList(): kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { default: kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), true) default: @@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the // given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) + return protoreflect.Value{}, d.unexpectedTokenError(tok) } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil + return protoreflect.ValueOfBool(b), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil + return protoreflect.ValueOfInt32(n), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil + return protoreflect.ValueOfInt64(n), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil + return protoreflect.ValueOfUint32(n), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil + return protoreflect.ValueOfUint64(n), nil } - case pref.FloatKind: + case protoreflect.FloatKind: if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil + return protoreflect.ValueOfFloat32(n), nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil + return protoreflect.ValueOfFloat64(n), nil } - case pref.StringKind: + case protoreflect.StringKind: if s, ok := tok.String(); ok { if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") } - return pref.ValueOfString(s), nil + return protoreflect.ValueOfString(s), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil + return protoreflect.ValueOfBytes([]byte(b)), nil } - case pref.EnumKind: + case protoreflect.EnumKind: if lit, ok := tok.Enum(); ok { // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil } } if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil } default: panic(fmt.Sprintf("invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } // unmarshalList unmarshals into given protoreflect.List. A list value can // either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { tok, err := d.Peek() if err != nil { return err } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: switch tok.Kind() { case text.ListOpen: d.Read() @@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { pval := mmap.NewValue() if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return pval, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value Loop: for { // Read field name. @@ -520,7 +520,7 @@ Loop: return d.unexpectedTokenError(tok) } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") @@ -535,7 +535,7 @@ Loop: key = val.MapKey() case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -561,7 +561,7 @@ Loop: } if !pval.IsValid() { switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: // If value field is not set for message/group types, construct an // empty one as default. pval = mmap.NewValue() @@ -575,7 +575,7 @@ Loop: // unmarshalAny unmarshals an Any textproto. It can either be in expanded form // or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { var typeURL string var bValue []byte var seenTypeUrl bool @@ -619,7 +619,7 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.Any_TypeUrl_field_name: if seenTypeUrl { return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) @@ -686,10 +686,10 @@ Loop: fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) } return nil } @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 8d5304dc5..95967e811 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -20,7 +20,6 @@ import ( "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -34,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -98,17 +97,23 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -118,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -126,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} @@ -150,7 +155,7 @@ type encoder struct { } // marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -190,7 +195,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(name, val.List(), fd) @@ -204,40 +209,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: s := val.String() if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { return errors.InvalidUTF8(string(fd.FullName())) } e.WriteString(s) - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: e.WriteUint(val.Uint()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(string(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: num := val.Enum() if desc := fd.Enum().Values().ByNumber(num); desc != nil { e.WriteLiteral(string(desc.Name())) @@ -246,7 +251,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error e.WriteInt(int64(num)) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return e.marshalMessage(val.Message(), true) default: @@ -256,7 +261,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { size := list.Len() for i := 0; i < size; i++ { e.WriteName(name) @@ -268,9 +273,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto } // marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -334,7 +339,7 @@ func (e encoder) marshalUnknown(b []byte) { // marshalAny marshals the given google.protobuf.Any message in expanded form. // It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { +func (e encoder) marshalAny(any protoreflect.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) diff --git a/event-exporter/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/event-exporter/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..e942bc983 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/event-exporter/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,10 +3,10 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -21,19 +21,16 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. @@ -55,6 +52,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -89,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -106,12 +104,16 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +128,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +145,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } @@ -159,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -258,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -379,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -407,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -427,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -451,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -466,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -490,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -501,12 +506,13 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} func DecodeZigZag(x uint64) int64 { @@ -514,6 +520,7 @@ func DecodeZigZag(x uint64) int64 { } // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} func EncodeZigZag(x int64) uint64 { @@ -521,6 +528,7 @@ func EncodeZigZag(x int64) uint64 { } // DecodeBool decodes a uint64 as a bool. +// // Input: { 0, 1, 2, …} // Output: {false, true, true, …} func DecodeBool(x uint64) bool { @@ -528,6 +536,7 @@ func DecodeBool(x uint64) bool { } // EncodeBool encodes a bool as a uint64. +// // Input: {false, true} // Output: { 0, 1} func EncodeBool(x bool) uint64 { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/event-exporter/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 360c63329..a45625c8d 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type list interface { @@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { if isRoot { var name string switch vs.(type) { - case pref.Names: + case protoreflect.Names: name = "Names" - case pref.FieldNumbers: + case protoreflect.FieldNumbers: name = "FieldNumbers" - case pref.FieldRanges: + case protoreflect.FieldRanges: name = "FieldRanges" - case pref.EnumRanges: + case protoreflect.EnumRanges: name = "EnumRanges" - case pref.FileImports: + case protoreflect.FileImports: name = "FileImports" - case pref.Descriptor: + case protoreflect.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" default: name = reflect.ValueOf(vs).Elem().Type().Name() @@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var ss []string switch vs := vs.(type) { - case pref.Names: + case protoreflect.Names: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldNumbers: + case protoreflect.FieldNumbers: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldRanges: + case protoreflect.FieldRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0]+1 == r[1] { @@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.EnumRanges: + case protoreflect.EnumRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0] == r[1] { @@ -80,46 +80,44 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.FileImports: + case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -128,27 +126,61 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { start = rt.Name() + "{" } - _, isFile := t.(pref.FileDescriptor) - rs := records{allowMulti: allowMulti} + _, isFile := t.(protoreflect.FileDescriptor) + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { - case pref.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + case protoreflect.FieldDescriptor: + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -156,21 +188,21 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { case "MapValue": if v := t.MapValue(); v != nil { switch v.Kind() { - case pref.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + case protoreflect.EnumKind: + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) + case protoreflect.MessageKind, protoreflect.GroupKind: + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -180,20 +212,68 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, s) } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: var ss []string fs := t.Fields() for i := 0; i < fs.Len(); i++ { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,21 +282,36 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } - if _, ok := rv.Interface().(pref.Value); ok { + if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] if !rv.IsNil() { rv = rv.Elem() @@ -250,9 +345,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { switch v := v.(type) { case list: s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: s = string(v.FullName()) case string: s = strconv.Quote(v) @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go index fdd9b13f2..328dc733b 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -15,8 +15,8 @@ import ( "strconv" ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" ) // Format is the serialization format used to represent the default value. @@ -35,56 +35,56 @@ const ( // Unmarshal deserializes the default string s according to the given kind k. // When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { switch s { case "1": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "0": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } else { switch s { case "true": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "false": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { // Go tags use the numeric form of the enum value. if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } } else { // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) + ev := evs.ByName(protoreflect.Name(s)) if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil + return protoreflect.ValueOfInt32(int32(v)), nil, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil + return protoreflect.ValueOfInt64(int64(v)), nil, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil + return protoreflect.ValueOfUint32(uint32(v)), nil, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil + return protoreflect.ValueOfUint64(uint64(v)), nil, nil } - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: var v float64 var err error switch s { @@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( v, err = strconv.ParseFloat(s, 64) } if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil } else { - return pref.ValueOfFloat64(float64(v)), nil, nil + return protoreflect.ValueOfFloat64(float64(v)), nil, nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil + return protoreflect.ValueOfBytes(b), nil, nil } } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) } // Marshal serializes v as the default string according to the given kind k. // When specifying the Descriptor format for an enum kind, the associated // enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { if v.Bool() { return "1", nil @@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( return "false", nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { return strconv.FormatInt(int64(v.Enum()), 10), nil } else { return string(ev.Name()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: f := v.Float() switch { case math.IsInf(f, -1): @@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( case math.IsNaN(f): return "nan", nil default: - if k == pref.FloatKind { + if k == protoreflect.FloatKind { return strconv.FormatFloat(f, 'g', -1, 32), nil } else { return strconv.FormatFloat(f, 'g', -1, 64), nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are serialized as is without any escaping. return v.String(), nil - case pref.BytesKind: + case protoreflect.BytesKind: if s, ok := marshalBytes(v.Bytes()); ok { return s, nil } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index b13fd29e8..d043a6ebe 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool { } // consumeToken constructs a Token for given Kind with raw value derived from -// current d.in and given size, and consumes the given size-lenght of it. +// current d.in and given size, and consumes the given size-length of it. func (d *Decoder) consumeToken(kind Kind, size int) Token { tok := Token{ kind: kind, diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go index fbdf34873..934f2dcb3 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -41,8 +41,10 @@ type Encoder struct { // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index c1866f3c1..a6693f0a2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // The MessageSet wire format is equivalent to a message defined as follows, @@ -33,6 +33,7 @@ const ( // ExtensionName is the field name for extensions of MessageSet. // // A valid MessageSet extension must be of the form: +// // message MyMessage { // extend proto2.bridge.MessageSet { // optional MyMessage message_set_extension = 1234; @@ -42,13 +43,13 @@ const ( const ExtensionName = "message_set_extension" // IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { +func IsMessageSet(md protoreflect.MessageDescriptor) bool { xmd, ok := md.(interface{ IsMessageSet() bool }) return ok && xmd.IsMessageSet() } // IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { switch { case fd.Name() != ExtensionName: return false diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 38f1931c6..373d20837 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var byteType = reflect.TypeOf(byte(0)) @@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) // This does not populate the Enum or Message (except for weak message). // // This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) + f.L0.FullName = protoreflect.FullName(s[len("name="):]) case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) + f.L1.Number = protoreflect.FieldNumber(n) case s == "opt": - f.L1.Cardinality = pref.Optional + f.L1.Cardinality = protoreflect.Optional case s == "req": - f.L1.Cardinality = pref.Required + f.L1.Cardinality = protoreflect.Required case s == "rep": - f.L1.Cardinality = pref.Repeated + f.L1.Cardinality = protoreflect.Repeated case s == "varint": switch goType.Kind() { case reflect.Bool: - f.L1.Kind = pref.BoolKind + f.L1.Kind = protoreflect.BoolKind case reflect.Int32: - f.L1.Kind = pref.Int32Kind + f.L1.Kind = protoreflect.Int32Kind case reflect.Int64: - f.L1.Kind = pref.Int64Kind + f.L1.Kind = protoreflect.Int64Kind case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind + f.L1.Kind = protoreflect.Uint32Kind case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind + f.L1.Kind = protoreflect.Uint64Kind } case s == "zigzag32": if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind + f.L1.Kind = protoreflect.Sint32Kind } case s == "zigzag64": if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind + f.L1.Kind = protoreflect.Sint64Kind } case s == "fixed32": switch goType.Kind() { case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind + f.L1.Kind = protoreflect.Sfixed32Kind case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind + f.L1.Kind = protoreflect.Fixed32Kind case reflect.Float32: - f.L1.Kind = pref.FloatKind + f.L1.Kind = protoreflect.FloatKind } case s == "fixed64": switch goType.Kind() { case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind + f.L1.Kind = protoreflect.Sfixed64Kind case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind + f.L1.Kind = protoreflect.Fixed64Kind case reflect.Float64: - f.L1.Kind = pref.DoubleKind + f.L1.Kind = protoreflect.DoubleKind } case s == "bytes": switch { case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind + f.L1.Kind = protoreflect.StringKind case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind + f.L1.Kind = protoreflect.BytesKind default: - f.L1.Kind = pref.MessageKind + f.L1.Kind = protoreflect.MessageKind } case s == "group": - f.L1.Kind = pref.GroupKind + f.L1.Kind = protoreflect.GroupKind case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind + f.L1.Kind = protoreflect.EnumKind case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { @@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. s, i = tag[len("def="):], len(tag) v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) + f.L1.Default = filedesc.DefaultValue(v, ev) case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 + f.L0.ParentFile = filedesc.SurrogateProto3 } tag = strings.TrimPrefix(tag[i:], ",") } // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) } return f } @@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p // Depending on the context on how Marshal is called, there are different ways // through which that information is determined. As such it is the caller's // responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { var tag []string switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: tag = append(tag, "varint") - case pref.Sint32Kind: + case protoreflect.Sint32Kind: tag = append(tag, "zigzag32") - case pref.Sint64Kind: + case protoreflect.Sint64Kind: tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: tag = append(tag, "bytes") - case pref.GroupKind: + case protoreflect.GroupKind: tag = append(tag, "group") } tag = append(tag, strconv.Itoa(int(fd.Number()))) switch fd.Cardinality() { - case pref.Optional: + case protoreflect.Optional: tag = append(tag, "opt") - case pref.Required: + case protoreflect.Required: tag = append(tag, "req") - case pref.Repeated: + case protoreflect.Repeated: tag = append(tag, "rep") } if fd.IsPacked() { tag = append(tag, "packed") } name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { // The name of the FieldDescriptor for a group field is // lowercased. To find the original capitalization, we // look in the field's MessageType. @@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { tag = append(tag, "proto3") } - if fd.Kind() == pref.EnumKind && enumName != "" { + if fd.Kind() == protoreflect.EnumKind && enumName != "" { tag = append(tag, "enum="+enumName) } if fd.ContainingOneof() != nil { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..87853e786 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "unicode/utf8" @@ -381,7 +380,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { @@ -413,15 +412,16 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } // parseTypeName parses Any type URL or extension field name. The name is @@ -571,7 +571,7 @@ func (d *Decoder) parseScalar() (Token, error) { return tok, nil } - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) } // parseLiteralValue parses a literal value. A literal value is used for @@ -653,8 +653,29 @@ func consume(b []byte, n int) []byte { return b } -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} // isDelim returns true if given byte is a delimiter character. func isDelim(c byte) bool { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index f2d90b789..45c81f029 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,12 +41,35 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the // following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// // It also returns the number of parsed bytes for the given number, 0 if it is // not a number. func parseNumber(input []byte) number { @@ -65,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -114,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -186,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go index 0ce8d6fb8..7ae6c2a3c 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -24,6 +24,6 @@ // the Go implementation should as well. // // The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters package text diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index da289ccce..cf7aed77b 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -53,8 +53,10 @@ type encoderState struct { // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/build.go index b293b6947..7cac1c190 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -12,8 +12,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder construct a protoreflect.FileDescriptor from the raw descriptor. @@ -38,7 +37,7 @@ type Builder struct { // TypeResolver resolves extension field types for descriptor options. // If nil, it uses protoregistry.GlobalTypes. TypeResolver interface { - preg.ExtensionTypeResolver + protoregistry.ExtensionTypeResolver } // FileRegistry is use to lookup file, enum, and message dependencies. @@ -46,8 +45,8 @@ type Builder struct { // If nil, it uses protoregistry.GlobalFiles. FileRegistry interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } } @@ -55,8 +54,8 @@ type Builder struct { // If so, it permits looking up an enum or message dependency based on the // sub-list and element index into filetype.Builder.DependencyIndexes. type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor } // Indexes of each sub-list in filetype.Builder.DependencyIndexes. @@ -70,7 +69,7 @@ const ( // Out is the output of the Builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor // Enums is all enum descriptors in "flattened ordering". Enums []Enum @@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { // Initialize resolvers and registries if unpopulated. if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes + db.TypeResolver = protoregistry.GlobalTypes } if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles + db.FileRegistry = protoregistry.GlobalFiles } fd := newRawFile(db) diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 98ab142ae..193c68e8f 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -17,15 +17,30 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -43,46 +58,70 @@ type ( L2 *FileL2 } FileL1 struct { - Syntax pref.Syntax + Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string - Package pref.FullName + Package protoreflect.FullName Enums Enums Messages Messages Extensions Extensions Services Services + + EditionFeatures FileEditionFeatures } FileL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + FileEditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + } ) -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() } return descopts.File } -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { @@ -119,7 +158,7 @@ type ( eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Values EnumValues ReservedNames Names ReservedRanges EnumRanges @@ -130,41 +169,41 @@ type ( L1 EnumValueL1 } EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber } ) -func (ed *Enum) Options() pref.ProtoMessage { +func (ed *Enum) Options() protoreflect.ProtoMessage { if f := ed.lazyInit().Options; f != nil { return f() } return descopts.Enum } -func (ed *Enum) Values() pref.EnumValueDescriptors { +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { if ed.L1.eagerValues { return &ed.L2.Values } return &ed.lazyInit().Values } -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *EnumValue) Options() pref.ProtoMessage { +func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { return f() } return descopts.EnumValue } -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} type ( Message struct { @@ -180,14 +219,14 @@ type ( IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields Fields Oneofs Oneofs ReservedNames Names ReservedRanges FieldRanges RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges } Field struct { @@ -195,10 +234,10 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions @@ -207,9 +246,12 @@ type ( HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor + + // Edition features. + Presence bool } Oneof struct { @@ -217,35 +259,35 @@ type ( L1 OneofL1 } OneofL1 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof } ) -func (md *Message) Options() pref.ProtoMessage { +func (md *Message) Options() protoreflect.ProtoMessage { if f := md.lazyInit().Options; f != nil { return f() } return descopts.Message } -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { return f() } return descopts.ExtensionRange } -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 @@ -260,28 +302,31 @@ func (md *Message) IsMessageSet() bool { return md.L1.IsMessageSet } -func (fd *Field) Options() pref.ProtoMessage { +func (fd *Field) Options() protoreflect.ProtoMessage { if f := fd.L1.Options; f != nil { return f() } return descopts.Field } -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + } + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: default: return true } @@ -290,40 +335,40 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { +func (fd *Field) MapKey() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } -func (fd *Field) MapValue() pref.FieldDescriptor { +func (fd *Field) MapValue() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) } -func (fd *Field) Enum() pref.EnumDescriptor { +func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } -func (fd *Field) Message() pref.MessageDescriptor { +func (fd *Field) Message() protoreflect.MessageDescriptor { if fd.L1.IsWeak { if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } } return fd.L1.Message } -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 // validation for the string field. This exists for Google-internal use only @@ -336,21 +381,21 @@ func (fd *Field) EnforceUTF8() bool { if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() } -func (od *Oneof) Options() pref.ProtoMessage { +func (od *Oneof) Options() protoreflect.ProtoMessage { if f := od.L1.Options; f != nil { return f() } return descopts.Oneof } -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} type ( Extension struct { @@ -359,55 +404,57 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } ) -func (xd *Extension) Options() pref.ProtoMessage { +func (xd *Extension) Options() protoreflect.ProtoMessage { if f := xd.lazyInit().Options; f != nil { return f() } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} func (xd *Extension) lazyInit() *ExtensionL2 { xd.L0.ParentFile.lazyInit() // implicitly initializes L2 return xd.L2 @@ -421,7 +468,7 @@ type ( } ServiceL1 struct{} ServiceL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Methods Methods } @@ -430,48 +477,48 @@ type ( L1 MethodL1 } MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor IsStreamingClient bool IsStreamingServer bool } ) -func (sd *Service) Options() pref.ProtoMessage { +func (sd *Service) Options() protoreflect.ProtoMessage { if f := sd.lazyInit().Options; f != nil { return f() } return descopts.Service } -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} func (sd *Service) lazyInit() *ServiceL2 { sd.L0.ParentFile.lazyInit() // implicitly initializes L2 return sd.L2 } -func (md *Method) Options() pref.ProtoMessage { +func (md *Method) Options() protoreflect.ProtoMessage { if f := md.L1.Options; f != nil { return f() } return descopts.Method } -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -479,24 +526,24 @@ type ( L0 BaseL0 } BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor Index int } ) -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { return nil // surrogate files are not real parents } return d.L0.ParentFile } -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} @@ -513,7 +560,7 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { // For extensions, JSON and text are formatted the same way. @@ -533,7 +580,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } @@ -541,10 +588,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { return s } -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { // Store a copy of the default bytes, so that we can detect @@ -554,9 +601,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { return dv } -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { // If the enum is declared within the same file, be careful not to // blindly call the Values method, lest we bind ourselves in a deadlock. if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { @@ -567,9 +614,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d // If we are unable to resolve the enum dependency, use a placeholder // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) return DefaultValue(v, ev) } } @@ -583,41 +630,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d type defaultValue struct { has bool - val pref.Value - enum pref.EnumValueDescriptor + val protoreflect.Value + enum protoreflect.EnumValueDescriptor bytes []byte } -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { // Return the zero value as the default if unpopulated. if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} } switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) + return protoreflect.ValueOfEnum(evs.Get(0).Number()) } - return pref.ValueOfEnum(0) + return protoreflect.ValueOfEnum(0) } } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 66e1fee52..4a1584c9d 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // fileRaw is a data struct used when initializing a file descriptor from @@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { sb := getBuilder() defer putBuilder(sb) - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int b0 := b @@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 case "proto3": - fd.L1.Syntax = pref.Proto3 + fd.L1.Syntax = protoreflect.Proto3 default: panic("invalid syntax") } case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 } // Must allocate all declarations before parsing each descriptor type @@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i @@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc } } -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int var posEnums, posMessages, posExtensions int b0 := b @@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { } } -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i @@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) + xd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) + xd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) + xd.L1.Kind = protoreflect.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref } } -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { sd.L0.ParentFile = pf sd.L0.Parent = pd sd.L0.Index = i @@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { // makeFullName converts b to a protoreflect.FullName, // where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { if len(b) == 0 || b[0] != '.' { panic("name reference must be fully qualified") } - return pref.FullName(sb.MakeString(b[1:])) + return protoreflect.FullName(sb.MakeString(b[1:])) } -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 198451e3e..736a19a75 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -13,7 +13,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) func (fd *File) lazyRawInit() { @@ -39,10 +39,10 @@ func (file *File) resolveMessages() { // Resolve message field dependency. switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ } @@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { // Resolve extension field dependency. switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) depIdx++ } @@ -92,7 +92,7 @@ func (file *File) resolveServices() { } } -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { @@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref } } if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) + return d.(protoreflect.EnumDescriptor) } return ed } -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { @@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 } } if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } return md } @@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { if imp == nil { imp = PlaceholderFile(path) } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) case genid.EnumDescriptorProto_Options_field_number: @@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) } -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { b = b[m:] switch num { case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) + r[0] = protoreflect.EnumNumber(v) case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) + r[1] = protoreflect.EnumNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { return r } -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { vd.L0.ParentFile = pf vd.L0.Parent = pd vd.L0.Index = i @@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) + vd.L1.Number = protoreflect.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) case genid.DescriptorProto_ExtensionRange_field_number: @@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { for i, b := range rawFields { fd := &md.L2.Fields.List[i] fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { + if fd.L1.Cardinality == protoreflect.Required { md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) } } @@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { } } -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { b = b[m:] switch num { case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { return r } -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions b = b[m:] switch num { case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions return r, rawOptions } -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i @@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) + fd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) + fd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) + fd.L1.Kind = protoreflect.Kind(v) case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either @@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = PlaceholderMessage(name) } } @@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { } } -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { od.L0.ParentFile = pf od.L0.Parent = pd od.L0.Index = i @@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = PlaceholderMessage(name) } } @@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) } -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i @@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { // // The type of message to unmarshal to is passed as a pointer since the // vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { if b == nil { return nil } - var opts pref.ProtoMessage + var opts protoreflect.ProtoMessage var once sync.Once - return func() pref.ProtoMessage { + return func() protoreflect.ProtoMessage { once.Do(func() { if *p == nil { panic("Descriptor.Options called without importing the descriptor package") } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) if err := (proto.UnmarshalOptions{ AllowPartial: true, Resolver: db.TypeResolver, diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index aa294fff9..e3b6587da 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -17,31 +17,30 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) -type FileImports []pref.FileImport +type FileImports []protoreflect.FileImport func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} type Names struct { - List []pref.Name + List []protoreflect.Name once sync.Once - has map[pref.Name]int // protected by once + has map[protoreflect.Name]int // protected by once } func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *Names) ProtoInternal(pragma.DoNotImplement) {} func (p *Names) lazyInit() *Names { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) + p.has = make(map[protoreflect.Name]int, len(p.List)) for _, s := range p.List { p.has[s] = p.has[s] + 1 } @@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { } type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive once sync.Once - sorted [][2]pref.EnumNumber // protected by once + sorted [][2]protoreflect.EnumNumber // protected by once } -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := enumRange(ls[i]); { @@ -129,14 +128,14 @@ func (r enumRange) String() string { } type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive once sync.Once - sorted [][2]pref.FieldNumber // protected by once + sorted [][2]protoreflect.FieldNumber // protected by once } -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := fieldRange(ls[i]); { @@ -221,17 +220,17 @@ func (r fieldRange) String() string { } type FieldNumbers struct { - List []pref.FieldNumber + List []protoreflect.FieldNumber once sync.Once - has map[pref.FieldNumber]struct{} // protected by once + has map[protoreflect.FieldNumber]struct{} // protected by once } -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) for _, n := range p.List { p.has[n] = struct{}{} } @@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} type OneofFields struct { - List []pref.FieldDescriptor + List []protoreflect.FieldDescriptor once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once } -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} func (p *OneofFields) lazyInit() *OneofFields { p.once.Do(func() { if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f @@ -284,123 +291,123 @@ type SourceLocations struct { // List is a list of SourceLocations. // The SourceLocation.Next field does not need to be populated // as it will be lazily populated upon first need. - List []pref.SourceLocation + List []protoreflect.SourceLocation // File is the parent file descriptor that these locations are relative to. // If non-nil, ByDescriptor verifies that the provided descriptor // is a child of this file descriptor. - File pref.FileDescriptor + File protoreflect.FileDescriptor once sync.Once byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { if i, ok := p.lazyInit().byPath[k]; ok { return p.List[i] } - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { return p.byKey(newPathKey(path)) } -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files + return protoreflect.SourceLocation{} // mismatching parent files } var pathArr [16]int32 path := pathArr[:0] for { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: // Reverse the path since it was constructed in reverse. for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { path[i], path[j] = path[j], path[i] } return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() path = append(path, int32(desc.Index())) desc = desc.Parent() if isExtension { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Extension_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } else { switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Field_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumValueDescriptor: + case protoreflect.EnumValueDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.MethodDescriptor: + case protoreflect.MethodDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } } @@ -435,7 +442,7 @@ type pathKey struct { str string // used if the path does not fit in arr } -func newPathKey(p pref.SourcePath) (k pathKey) { +func newPathKey(p protoreflect.SourcePath) (k pathKey) { if len(p) < len(k.arr) { for i, ps := range p { if ps < 0 || math.MaxUint8 <= ps { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index dbf2c605b..28240ebc5 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var ( @@ -30,78 +30,80 @@ var ( // PlaceholderFile is a placeholder, representing only the file path. type PlaceholderFile string -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName +type PlaceholderEnum protoreflect.FullName -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName +type PlaceholderEnumValue protoreflect.FullName -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName +type PlaceholderMessage protoreflect.FullName -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/filetype/build.go b/event-exporter/vendor/google.golang.org/protobuf/internal/filetype/build.go index 0a0dd35de..f0e38c4ef 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -10,17 +10,16 @@ import ( "reflect" "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filedesc" pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder constructs type descriptors from a raw file descriptor // and associated Go types for each enum and message declaration. // -// -// Flattened Ordering +// # Flattened Ordering // // The protobuf type system represents declarations as a tree. Certain nodes in // the tree require us to either associate it with a concrete Go type or to @@ -52,7 +51,7 @@ import ( // that children themselves may have. type Builder struct { // File is the underlying file descriptor builder. - File fdesc.Builder + File filedesc.Builder // GoTypes is a unique set of the Go types for all declarations and // dependencies. Each type is represented as a zero value of the Go type. @@ -108,22 +107,22 @@ type Builder struct { // TypeRegistry is the registry to register each type descriptor. // If nil, it uses protoregistry.GlobalTypes. TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error } } // Out is the output of the builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor } func (tb Builder) Build() (out Out) { // Replace the resolver with one that resolves dependencies by index, // which is faster and more reliable than relying on the global registry. if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles + tb.File.FileRegistry = protoregistry.GlobalFiles } tb.File.FileRegistry = &resolverByIndex{ goTypes: tb.GoTypes, @@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { // Initialize registry if unpopulated. if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes + tb.TypeRegistry = protoregistry.GlobalTypes } fbOut := tb.File.Build() @@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { for i := range fbOut.Messages { switch fbOut.Messages[i].Name() { case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) } } } @@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { const listExtDeps = 2 var goType reflect.Type switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ @@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { return out } -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), } type depIdxs []int32 @@ -274,13 +273,13 @@ type ( fileRegistry } fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } ) -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { return &es[depIdx] } else { @@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes } } -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { return &ms[depIdx-len(es)] } else { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/event-exporter/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c20..8f94230ea 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,12 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -50,6 +56,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +70,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +87,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -180,13 +189,58 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -430,6 +484,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -452,6 +507,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -477,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -494,26 +551,32 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +591,28 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +622,14 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +645,39 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -572,13 +686,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -590,20 +707,26 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -615,15 +738,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -635,15 +764,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -658,10 +790,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -669,6 +803,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -735,6 +870,120 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -813,11 +1062,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +1077,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/event-exporter/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 3bc710138..e0f75fea0 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -32,6 +32,7 @@ const ( Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ const ( Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ const ( Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ const ( Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ const ( Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/api_export.go index abee5f30e..a371f98de 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Export is a zero-length named type that exists only to export a set of @@ -32,11 +32,11 @@ type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { +func (Export) EnumOf(e enum) protoreflect.Enum { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e default: return legacyWrapEnum(reflect.ValueOf(e)) @@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. // It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Descriptor() default: return LegacyLoadEnumDesc(reflect.TypeOf(e)) @@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { // EnumTypeOf returns the protoreflect.EnumType for e. // It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Type() default: return legacyLoadEnumType(reflect.TypeOf(e)) @@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { // EnumStringOf returns the enum value as a string, either as the name if // the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { ev := ed.Values().ByNumber(n) if ev != nil { return string(ev.Name()) @@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } @@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. // It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { switch mv := m.(type) { case nil: return nil - case piface.MessageV1: + case protoiface.MessageV1: return mv case unwrapper: return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return legacyMessageWrapper{mv} default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) } } -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { switch mv := m.(type) { case nil: return nil - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return mv case legacyMessageWrapper: return mv.m - case piface.MessageV1: + case protoiface.MessageV1: return nil default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) @@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. // It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { if m == nil { return nil } @@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { // MessageOf returns the protoreflect.Message interface over m. // It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { +func (Export) MessageOf(m message) protoreflect.Message { if m == nil { return nil } @@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. // It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { if m == nil { return nil } @@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { // MessageTypeOf returns the protoreflect.MessageType for m. // It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { +func (Export) MessageTypeOf(m message) protoreflect.MessageType { if m == nil { return nil } @@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { // MessageStringOf returns the message value as a string, // which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { return prototext.MarshalOptions{Multiline: false}.Format(m) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index b82341e57..bff041edc 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -8,18 +8,18 @@ import ( "sync" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() } else { p = in.Message.(*messageReflectWrapper).pointer() } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) } func (mi *MessageInfo) checkInitializedPointer(p pointer) error { @@ -90,7 +90,7 @@ var ( // needsInitCheck reports whether a message needs to be checked for partial initialization. // // It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { +func needsInitCheck(md protoreflect.MessageDescriptor) bool { if v, ok := needsInitCheckMap.Load(md); ok { if has, ok := v.(bool); ok { return has @@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { return needsInitCheckLocked(md) } -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { if v, ok := needsInitCheckMap.Load(md); ok { // If has is true, we've previously determined that this message // needs init checks. diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 08d35170b..e74cefdc5 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type extensionFieldInfo struct { @@ -23,7 +23,7 @@ type extensionFieldInfo struct { var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info @@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { } // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { return xi.(*extensionFieldInfo) } @@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { return e } -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { var wiretag uint64 if !xd.IsPacked() { wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) @@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { // This is true for composite types, where we pass in a message, list, or map to fill in, // and for enums, where we pass in a prototype value to specify the concrete enum type. switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: e.unmarshalNeedsValue = true default: - if xd.Cardinality() == pref.Repeated { + if xd.Cardinality() == protoreflect.Repeated { e.unmarshalNeedsValue = true } } @@ -73,21 +73,21 @@ type lazyExtensionValue struct { atomicOnce uint32 // atomically set if value is valid mu sync.Mutex xi *extensionFieldInfo - value pref.Value + value protoreflect.Value b []byte - fn func() pref.Value + fn func() protoreflect.Value } type ExtensionField struct { - typ pref.ExtensionType + typ protoreflect.ExtensionType // value is either the value of GetValue, // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value + value protoreflect.Value lazy *lazyExtensionValue } -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { if f.lazy == nil { f.lazy = &lazyExtensionValue{xi: xi} } @@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie f.lazy.b = append(f.lazy.b, b...) } -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { if f.typ == nil { return true } @@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { // Set sets the type and value of the extension field. // This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { f.typ = t f.value = v f.lazy = nil @@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { // SetLazy sets the type and a value that is to be lazily evaluated upon first use. // This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { f.typ = t f.lazy = &lazyExtensionValue{fn: fn} } // Value returns the value of the extension field. // This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { +func (f *ExtensionField) Value() protoreflect.Value { if f.lazy != nil { if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { f.lazyInit() @@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { // Type returns the type of the extension field. // This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { +func (f ExtensionField) Type() protoreflect.ExtensionType { return f.typ } @@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { // IsLazy reports whether a field is lazily encoded. // It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { var mi *MessageInfo var p pointer switch m := m.(type) { @@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { default: return false } - xd, ok := fd.(pref.ExtensionTypeDescriptor) + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { return false } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index cb4b482d1..3fadd241e 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -12,9 +12,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) type errInvalidUTF8 struct{} @@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } // to the appropriate field-specific function as necessary. // // The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { fs := si.oneofsByName[od.Name()] ft := fs.Type oneofFields := make(map[reflect.Type]*coderFieldInfo) @@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn } } -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) }) } @@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { } } -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageInfo, @@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.ProtoReflect(), }) @@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeMessage(m, tagsize, opts) } -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendMessage(b, m, wiretag, opts) } -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeMessage(b, m, wtyp, opts) return v, out, err } -func isInitMessageValue(v pref.Value) error { +func isInitMessageValue(v protoreflect.Value) error { m := v.Message().Interface() return proto.CheckInitialized(m) } @@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ merge: mergeMessageValue, } -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeGroup(m, tagsize, opts) } -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendGroup(b, m, wiretag, opts) } -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeGroup(b, m, num, wtyp, opts) return v, out, err @@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ merge: mergeMessageValue, } -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.ProtoReflect(), }) @@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageSliceInfo, @@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: asMessage(mp).ProtoReflect(), }) @@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i return n } -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma return b, nil } -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } -func isInitMessageSliceValue(listv pref.Value) error { +func isInitMessageSliceValue(listv protoreflect.Value) error { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() @@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int return n } -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars return b, nil } -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } @@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: asMessage(mp).ProtoReflect(), }) @@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie return out, nil } -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { return m } return legacyWrapMessage(v).Interface() diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63e..f55dc01e3 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index c1245fef4..111b9d16f 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapInfo struct { @@ -19,12 +19,12 @@ type mapInfo struct { valWiretag uint64 keyFuncs valueCoderFuncs valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind + keyZero protoreflect.Value + keyKind protoreflect.Kind conv *mapConverter } -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { // TODO: Consider generating specialized map coders. keyField := fd.MapKey() valField := fd.MapValue() @@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage keyKind: keyField.Kind(), conv: conv, } - if valField.Kind() == pref.MessageKind { + if valField.Kind() == protoreflect.MessageKind { valueMessage = getMessageInfo(ft.Elem()) } @@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage }, } switch valField.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: funcs.merge = mergeMapOfMessage - case pref.BytesKind: + case protoreflect.BytesKind: funcs.merge = mergeMapOfBytes default: funcs.merge = mergeMap @@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo err := errUnknown switch num { case genid.MapEntry_Key_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { @@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo key = v n = o.n case genid.MapEntry_Value_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) if err != nil { @@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi err := errUnknown switch num { case 1: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index cd40527ff..6b2fdbb73 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -12,15 +12,15 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // coderMessageInfo contains per-message information used by the fast-path functions. // This is a different type from MessageInfo to keep MessageInfo as general-purpose as // possible. type coderMessageInfo struct { - methods piface.Methods + methods protoiface.Methods orderedCoderFields []*coderFieldInfo denseCoderFields []*coderFieldInfo @@ -38,13 +38,13 @@ type coderFieldInfo struct { funcs pointerCoderFuncs // fast-path per-field functions mi *MessageInfo // field's message ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { @@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num }) - var maxDense pref.FieldNumber + var maxDense protoreflect.FieldNumber for _, cf := range mi.orderedCoderFields { if cf.num >= 16 && cf.num >= 2*maxDense { break @@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.needsInitCheck = needsInitCheck(mi.Desc) if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic mi.methods.Marshal = mi.marshal mi.methods.Size = mi.size } if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown mi.methods.Unmarshal = mi.unmarshal } if mi.methods.CheckInitialized == nil { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3ae..145c577bd 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index e89971238..576dcf3aa 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // pointerCoderFuncs is a set of pointer encoding functions. @@ -25,83 +25,83 @@ type pointerCoderFuncs struct { // valueCoderFuncs is a set of protoreflect.Value encoding functions. type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value } // fieldCoder returns pointer functions for a field, used for operating on // struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { switch { case fd.IsMap(): return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): // Repeated fields (not packed). if ft.Kind() != reflect.Slice { break } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Slice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Slice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Slice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Slice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Slice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Slice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Slice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Slice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Slice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Slice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleSlice } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringSliceValidateUTF8 } @@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringSlice } if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.MessageKind: + case protoreflect.MessageKind: return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: + case protoreflect.GroupKind: return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): // Packed repeated fields. // // Only repeated fields of primitive numeric types @@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPackedSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPackedSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32PackedSlice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32PackedSlice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32PackedSlice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64PackedSlice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64PackedSlice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64PackedSlice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32PackedSlice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32PackedSlice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPackedSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64PackedSlice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64PackedSlice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePackedSlice } } - case fd.Kind() == pref.MessageKind: + case fd.Kind() == protoreflect.MessageKind: return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: + case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolNoZero } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumNoZero } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32NoZero } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32NoZero } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32NoZero } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64NoZero } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64NoZero } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64NoZero } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32NoZero } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32NoZero } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatNoZero } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64NoZero } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64NoZero } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleNoZero } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringNoZeroValidateUTF8 } @@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesNoZero } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringNoZero } @@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer case ft.Kind() == reflect.Ptr: ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPtr } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPtr } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Ptr } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Ptr } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Ptr } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Ptr } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Ptr } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Ptr } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Ptr } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Ptr } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPtr } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Ptr } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Ptr } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePtr } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringPtrValidateUTF8 } if ft.Kind() == reflect.String { return nil, coderStringPtr } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringPtr } } default: switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBool } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnum } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32 } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32 } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32 } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64 } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64 } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64 } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32 } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32 } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloat } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64 } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64 } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDouble } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringValidateUTF8 } @@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytes } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderString } @@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer // encoderFuncsForValue returns value functions for a field, used for // extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32SliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32SliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32SliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64SliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64SliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64SliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32SliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32SliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64SliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64SliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleSliceValue - case pref.StringKind: + case protoreflect.StringKind: // We don't have a UTF-8 validating coder for repeated string fields. // Value coders are used for extensions and maps. // Extensions are never proto3, and maps never contain lists. return coderStringSliceValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesSliceValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageSliceValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupSliceValue } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolPackedSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumPackedSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32PackedSliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32PackedSliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32PackedSliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64PackedSliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64PackedSliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64PackedSliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32PackedSliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatPackedSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64PackedSliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoublePackedSliceValue } default: switch fd.Kind() { default: - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32Value - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32Value - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32Value - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64Value - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64Value - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64Value - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32Value - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32Value - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64Value - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64Value - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleValue - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { return coderStringValueValidateUTF8 } return coderStringValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupValue } } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert.go index acd61bb50..185ef2efa 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // unwrapper unwraps the value to the underlying value. @@ -20,13 +20,13 @@ type unwrapper interface { // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. type Converter interface { // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value + PBValueOf(reflect.Value) protoreflect.Value // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value + GoValueOf(protoreflect.Value) reflect.Value // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool + IsValidPB(protoreflect.Value) bool // IsValidGo returns whether a reflect.Value is compatible with this type. IsValidGo(reflect.Value) bool @@ -34,12 +34,12 @@ type Converter interface { // New returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns a new mutable value. - New() pref.Value + New() protoreflect.Value // Zero returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns an immutable, empty value. - Zero() pref.Value + Zero() protoreflect.Value } // NewConverter matches a Go type with a protobuf field and returns a Converter @@ -50,7 +50,7 @@ type Converter interface { // This matcher deliberately supports a wider range of Go types than what // protoc-gen-go historically generated to be able to automatically wrap some // v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case fd.IsList(): return newListConverter(t, fd) @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( @@ -76,68 +75,68 @@ var ( ) var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) ) -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { // Default isn't defined for repeated fields. return zero } return fd.Default() } switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if t.Kind() == reflect.Bool { return &boolConverter{t, defVal(fd, boolZero)} } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if t.Kind() == reflect.Int32 { return &int32Converter{t, defVal(fd, int32Zero)} } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if t.Kind() == reflect.Int64 { return &int64Converter{t, defVal(fd, int64Zero)} } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if t.Kind() == reflect.Uint32 { return &uint32Converter{t, defVal(fd, uint32Zero)} } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if t.Kind() == reflect.Uint64 { return &uint64Converter{t, defVal(fd, uint64Zero)} } - case pref.FloatKind: + case protoreflect.FloatKind: if t.Kind() == reflect.Float32 { return &float32Converter{t, defVal(fd, float32Zero)} } - case pref.DoubleKind: + case protoreflect.DoubleKind: if t.Kind() == reflect.Float64 { return &float64Converter{t, defVal(fd, float64Zero)} } - case pref.StringKind: + case protoreflect.StringKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &stringConverter{t, defVal(fd, stringZero)} } - case pref.BytesKind: + case protoreflect.BytesKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &bytesConverter{t, defVal(fd, bytesZero)} } - case pref.EnumKind: + case protoreflect.EnumKind: // Handle enums, which must be a named int32 type. if t.Kind() == reflect.Int32 { return newEnumConverter(t, fd) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return newMessageConverter(t) } panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) @@ -145,184 +144,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { type boolConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfBool(v.Bool()) + return protoreflect.ValueOfBool(v.Bool()) } -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bool()).Convert(c.goType) } -func (c *boolConverter) IsValidPB(v pref.Value) bool { +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(bool) return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } type int32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt32(int32(v.Int())) + return protoreflect.ValueOfInt32(int32(v.Int())) } -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int32(v.Int())).Convert(c.goType) } -func (c *int32Converter) IsValidPB(v pref.Value) bool { +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int32) return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } type int64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt64(int64(v.Int())) + return protoreflect.ValueOfInt64(int64(v.Int())) } -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int64(v.Int())).Convert(c.goType) } -func (c *int64Converter) IsValidPB(v pref.Value) bool { +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int64) return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } type uint32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint32(uint32(v.Uint())) + return protoreflect.ValueOfUint32(uint32(v.Uint())) } -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) } -func (c *uint32Converter) IsValidPB(v pref.Value) bool { +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint32) return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } type uint64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint64(uint64(v.Uint())) + return protoreflect.ValueOfUint64(uint64(v.Uint())) } -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) } -func (c *uint64Converter) IsValidPB(v pref.Value) bool { +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint64) return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } type float32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat32(float32(v.Float())) + return protoreflect.ValueOfFloat32(float32(v.Float())) } -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float32(v.Float())).Convert(c.goType) } -func (c *float32Converter) IsValidPB(v pref.Value) bool { +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float32) return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } type float64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat64(float64(v.Float())) + return protoreflect.ValueOfFloat64(float64(v.Float())) } -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float64(v.Float())).Convert(c.goType) } -func (c *float64Converter) IsValidPB(v pref.Value) bool { +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float64) return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } type stringConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfString(v.Convert(stringType).String()) + return protoreflect.ValueOfString(v.Convert(stringType).String()) } -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { // pref.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) @@ -331,71 +330,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { } return reflect.ValueOf(s).Convert(c.goType) } -func (c *stringConverter) IsValidPB(v pref.Value) bool { +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(string) return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } type bytesConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) } -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bytes()).Convert(c.goType) } -func (c *bytesConverter) IsValidPB(v pref.Value) bool { +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().([]byte) return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } type enumConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) } else { def = fd.Default() } return &enumConverter{goType, def} } -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) } -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Enum()).Convert(c.goType) } -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) return ok } @@ -403,11 +402,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *enumConverter) New() pref.Value { +func (c *enumConverter) New() protoreflect.Value { return c.def } -func (c *enumConverter) Zero() pref.Value { +func (c *enumConverter) Zero() protoreflect.Value { return c.def } @@ -419,7 +418,7 @@ func newMessageConverter(goType reflect.Type) Converter { return &messageConverter{goType} } -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } @@ -430,13 +429,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { v = reflect.Zero(reflect.PtrTo(v.Type())) } } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) } - return pref.ValueOfMessage(legacyWrapMessage(v)) + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) } -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -460,7 +459,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { return rv } -func (c *messageConverter) IsValidPB(v pref.Value) bool { +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -478,14 +477,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *messageConverter) New() pref.Value { +func (c *messageConverter) New() protoreflect.Value { if c.isNonPointer() { return c.PBValueOf(reflect.New(c.goType).Elem()) } return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *messageConverter) Zero() pref.Value { +func (c *messageConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 6fccab520..f89136516 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -8,10 +8,10 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} @@ -26,16 +26,16 @@ type listConverter struct { c Converter } -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } pv := reflect.New(c.goType) pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) } -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { rv := v.List().(*listReflect).v if rv.IsNil() { return reflect.Zero(c.goType) @@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { return rv.Elem() } -func (c *listConverter) IsValidPB(v pref.Value) bool { +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) } -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) } type listPtrConverter struct { @@ -68,18 +68,18 @@ type listPtrConverter struct { c Converter } -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfList(&listReflect{v, c.c}) + return protoreflect.ValueOfList(&listReflect{v, c.c}) } -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.List().(*listReflect).v } -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listPtrConverter) New() pref.Value { +func (c *listPtrConverter) New() protoreflect.Value { return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *listPtrConverter) Zero() pref.Value { +func (c *listPtrConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { } return ls.v.Elem().Len() } -func (ls *listReflect) Get(i int) pref.Value { +func (ls *listReflect) Get(i int) protoreflect.Value { return ls.conv.PBValueOf(ls.v.Elem().Index(i)) } -func (ls *listReflect) Set(i int, v pref.Value) { +func (ls *listReflect) Set(i int, v protoreflect.Value) { ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) } -func (ls *listReflect) Append(v pref.Value) { +func (ls *listReflect) Append(v protoreflect.Value) { ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) } -func (ls *listReflect) AppendMutable() pref.Value { +func (ls *listReflect) AppendMutable() protoreflect.Value { if _, ok := ls.conv.(*messageConverter); !ok { panic("invalid AppendMutable on list with non-message type") } @@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { func (ls *listReflect) Truncate(i int) { ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) } -func (ls *listReflect) NewElement() pref.Value { +func (ls *listReflect) NewElement() protoreflect.Value { return ls.conv.New() } func (ls *listReflect) IsValid() bool { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index de06b2593..f30b0a057 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapConverter struct { @@ -16,7 +16,7 @@ type mapConverter struct { keyConv, valConv Converter } -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { if t.Kind() != reflect.Map { panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } @@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { } } -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) } -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.Map().(*mapReflect).v } -func (c *mapConverter) IsValidPB(v pref.Value) bool { +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { mapv, ok := v.Interface().(*mapReflect) if !ok { return false @@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *mapConverter) New() pref.Value { +func (c *mapConverter) New() protoreflect.Value { return c.PBValueOf(reflect.MakeMap(c.goType)) } -func (c *mapConverter) Zero() pref.Value { +func (c *mapConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -67,29 +67,29 @@ type mapReflect struct { func (ms *mapReflect) Len() int { return ms.v.Len() } -func (ms *mapReflect) Has(k pref.MapKey) bool { +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) return rv.IsValid() } -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) if !rv.IsValid() { - return pref.Value{} + return protoreflect.Value{} } return ms.valConv.PBValueOf(rv) } -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.valConv.GoValueOf(v) ms.v.SetMapIndex(rk, rv) } -func (ms *mapReflect) Clear(k pref.MapKey) { +func (ms *mapReflect) Clear(k protoreflect.MapKey) { rk := ms.keyConv.GoValueOf(k.Value()) ms.v.SetMapIndex(rk, reflect.Value{}) } -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { if _, ok := ms.valConv.(*messageConverter); !ok { panic("invalid Mutable on map with non-message value type") } @@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { } return v } -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { iter := mapRange(ms.v) for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() @@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { } } } -func (ms *mapReflect) NewValue() pref.Value { +func (ms *mapReflect) NewValue() protoreflect.Value { return ms.valConv.New() } func (ms *mapReflect) IsValid() bool { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/decode.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a6..cda0520c2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -12,12 +12,12 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +25,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -36,14 +37,17 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { } } -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes } var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, + resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -52,7 +56,7 @@ type unmarshalOutput struct { } // unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() @@ -62,12 +66,13 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) - var flags piface.UnmarshalOutputFlags + var flags protoiface.UnmarshalOutputFlags if out.initialized { - flags |= piface.UnmarshalInitialized + flags |= protoiface.UnmarshalInitialized } - return piface.UnmarshalOutput{ + return protoiface.UnmarshalOutput{ Flags: flags, }, err } @@ -82,6 +87,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } @@ -202,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p var err error xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) if err != nil { - if err == preg.NotFound { + if err == protoregistry.NotFound { return out, errUnknown } return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/enum.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/enum.go index 8c1eab4bf..5f3ef5ad7 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -7,15 +7,15 @@ package impl import ( "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type EnumInfo struct { GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor + Desc protoreflect.EnumDescriptor } -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) } -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/extension.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/extension.go index e904fd993..cb25b0bae 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // ExtensionInfo implements ExtensionType. @@ -45,7 +45,7 @@ type ExtensionInfo struct { // since the message may no longer implement the MessageV1 interface. // // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 + ExtendedType protoiface.MessageV1 // ExtensionType is the zero value of the extension type. // @@ -83,31 +83,31 @@ const ( extensionInfoFullInit = 2 ) -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { xi.goType = goType xi.desc = extensionTypeDescriptor{xd, xi} xi.init = extensionInfoDescInit } -func (xi *ExtensionInfo) New() pref.Value { +func (xi *ExtensionInfo) New() protoreflect.Value { return xi.lazyInit().New() } -func (xi *ExtensionInfo) Zero() pref.Value { +func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { xi.lazyInitSlow() } @@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { } type extensionTypeDescriptor struct { - pref.ExtensionDescriptor + protoreflect.ExtensionDescriptor xi *ExtensionInfo } -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { return xtd.xi } -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { return xtd.ExtensionDescriptor } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index f7d7ffb51..c2a803bb2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -13,13 +13,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) // legacyEnumName returns the name of enums used in legacy code. // It is neither the protobuf full name nor the qualified Go name, // but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { +func legacyEnumName(ed protoreflect.EnumDescriptor) string { var protoPkg string enumName := string(ed.FullName()) if fd := ed.ParentFile(); fd != nil { @@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { // legacyWrapEnum wraps v as a protoreflect.Enum, // where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) + return et.New(protoreflect.EnumNumber(v.Int())) } var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, // where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { // Fast-path: check if a EnumType is cached for this concrete type. if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType + var et protoreflect.EnumType ed := LegacyLoadEnumDesc(t) et = &legacyEnumType{ desc: ed, goType: t, } if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } return et } type legacyEnumType struct { - desc pref.EnumDescriptor + desc protoreflect.EnumDescriptor goType reflect.Type m sync.Map // map[protoreflect.EnumNumber]proto.Enum } -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) + return e.(protoreflect.Enum) } e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} t.m.Store(n, e) return e } -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { return t.desc } type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType goTyp reflect.Type } -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { return e.pbTyp.Descriptor() } -func (e *legacyEnumWrapper) Type() pref.EnumType { +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { return e.pbTyp } -func (e *legacyEnumWrapper) Number() pref.EnumNumber { +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { return e.num } -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } func (e *legacyEnumWrapper) protoUnwrap() interface{} { @@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { } var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) ) var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor @@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor // which must be an int32 kind and not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: initialize EnumDescriptor from the raw descriptor. ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { + if _, ok := ev.(protoreflect.Enum); ok { panic(fmt.Sprintf("%v already implements proto.Enum", t)) } edV1, ok := ev.(enumV1) @@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { } b, idxs := edV1.EnumDescriptor() - var ed pref.EnumDescriptor + var ed protoreflect.EnumDescriptor if len(idxs) == 1 { ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) } else { @@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript // We are unable to use the global enum registry since it is // unfortunately keyed by the protobuf full name, which we also do not know. // Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: construct a bogus, but unique EnumDescriptor. @@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // An exhaustive query is clearly impractical, but can be best-effort. if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } return ed } @@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // It should be sufficiently unique within a program. // // This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { sanitize := func(r rune) rune { switch { case r == '/': @@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { ss[i] = "x" + s } } - return pref.FullName(strings.Join(ss, ".")) + return protoreflect.FullName(strings.Join(ss, ".")) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index e3fb0b578..9b64ad5bb 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -12,21 +12,21 @@ import ( "reflect" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // These functions exist to support exported APIs in generated protobufs. // While these are deprecated, they cannot be removed for compatibility reasons. // LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { return legacyEnumName(ed) } // LegacyMessageTypeOf returns the protoreflect.MessageType for m, // with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } @@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. // The input can either be a string representing the enum value by name, // or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { if b[0] == '"' { - var name pref.Name + var name protoreflect.Name if err := json.Unmarshal(b, &name); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb } return ev.Number(), nil } else { - var num pref.EnumNumber + var num protoreflect.EnumNumber if err := json.Unmarshal(b, &num); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. blockSize = len(in) } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) out = append(out, blockHeader[:]...) out = append(out, in[:blockSize]...) in = in[blockSize:] diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 49e723161..87b30d050 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -12,16 +12,16 @@ import ( ptag "google.golang.org/protobuf/internal/encoding/tag" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) func (xi *ExtensionInfo) initToLegacy() { xd := xi.desc - var parent piface.MessageV1 + var parent protoiface.MessageV1 messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { // Create a new parent message and unwrap it if possible. mv := mt.New().Interface() t := reflect.TypeOf(mv) @@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Check whether the message implements the legacy v1 Message interface. mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { + if mz, ok := mz.(protoiface.MessageV1); ok { parent = mz } } @@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Reconstruct the legacy enum full name. var enumName string - if xd.Kind() == pref.EnumKind { + if xd.Kind() == protoreflect.EnumKind { enumName = legacyEnumName(xd.Enum()) } @@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { // field number is specified. In such a case, use a placeholder. if xi.ExtendedType == nil || xi.ExtensionType == nil { xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), } xi.desc = extensionTypeDescriptor{xd, xi} return } // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor t := reflect.TypeOf(xi.ExtensionType) isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 @@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { t = t.Elem() } switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: ed = v.Descriptor() case enumV1: ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: md = v.ProtoReflect().Descriptor() case messageV1: md = LegacyLoadMessageDesc(t) } // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors + var evs protoreflect.EnumValueDescriptors if ed != nil { evs = ed.Values() } @@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { // Construct a v2 ExtensionType. xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind xd.L2.IsPacked = fd.L1.IsPacked @@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { } type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber + name protoreflect.FullName + number protoreflect.FieldNumber } -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 029feeefd..2ab2c6297 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -16,14 +16,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { +func legacyWrapMessage(v reflect.Value) protoreflect.Message { t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} @@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { // legacyLoadMessageType dynamically loads a protoreflect.Type for t, // where t must be not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessageType{t} } @@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, // where t must be a *struct kind and not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { // Fast-path: check if a MessageInfo is cached for this concrete type. if mt, ok := legacyMessageTypeCache.Load(t); ok { return mt.(*MessageInfo) @@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic } if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal @@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc // which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { return legacyLoadMessageDesc(t, "") } -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if a MessageDescriptor is cached for this concrete type. if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) + return mi.(protoreflect.MessageDescriptor) } // Slow-path: initialize MessageDescriptor from the raw descriptor. mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { + if _, ok := mv.(protoreflect.ProtoMessage); ok { panic(fmt.Sprintf("%v already implements proto.Message", t)) } mdV1, ok := mv.(messageV1) @@ -164,7 +162,7 @@ var ( // // This is a best-effort derivation of the message descriptor using the protobuf // tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { aberrantMessageDescLock.Lock() defer aberrantMessageDescLock.Unlock() if aberrantMessageDescCache == nil { @@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes } return aberrantLoadMessageDescReentrant(t, name) } -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if an MessageDescriptor is cached for this concrete type. if md, ok := aberrantMessageDescCache[t]; ok { return md @@ -208,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } @@ -225,9 +228,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] for i := 0; i < vs.Len(); i++ { v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), }) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) } @@ -245,7 +248,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M n := len(md.L2.Oneofs.List) md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile od.L0.Parent = md od.L0.Index = n @@ -267,14 +270,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M return md } -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { if name.IsValid() { return name } func() { defer func() { recover() }() // swallow possible nil panics if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) + name = protoreflect.FullName(m.XXX_MessageName()) } }() if name.IsValid() { @@ -305,7 +308,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Index = n if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { + fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) @@ -318,17 +321,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, } // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: fd.L1.Enum = v.Descriptor() default: fd.L1.Enum = LegacyLoadEnumDesc(t) } } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: + case protoreflect.ProtoMessage: fd.L1.Message = v.ProtoReflect().Descriptor() case messageV1: fd.L1.Message = LegacyLoadMessageDesc(t) @@ -337,13 +340,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, n := len(md.L1.Messages.List) md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { + md2.L2.Options = func() protoreflect.ProtoMessage { opts := descopts.Message.ProtoReflect().New() opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) return opts.Interface() @@ -364,8 +367,8 @@ type placeholderEnumValues struct { protoreflect.EnumValueDescriptors } -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) } // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. @@ -383,7 +386,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var aberrantProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &protoiface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -392,40 +395,40 @@ var aberrantProtoMethods = &piface.Methods{ // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, + Flags: protoiface.SupportMarshalDeterministic, } -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() marshaler, ok := v.(legacyMarshaler) if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) } out, err := marshaler.Marshal() if in.Buf != nil { out = append(in.Buf, out...) } - return piface.MarshalOutput{ + return protoiface.MarshalOutput{ Buf: out, }, err } -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } -func legacyMerge(in piface.MergeInput) piface.MergeOutput { +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) if ok { merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // If legacy merger is unavailable, implement merge in terms of @@ -433,29 +436,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { srcv := in.Source.(unwrapper).protoUnwrap() marshaler, ok := srcv.(legacyMarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } dstv = in.Destination.(unwrapper).protoUnwrap() unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } if !in.Source.IsValid() { // Legacy Marshal methods may not function on nil messages. // Check for a typed nil source only after we confirm that // legacy Marshal/Unmarshal methods are present, for // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } b, err := marshaler.Marshal() if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } err = unmarshaler.Unmarshal(b) if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // aberrantMessageType implements MessageType for all types other than pointer-to-struct. @@ -463,19 +466,19 @@ type aberrantMessageType struct { t reflect.Type } -func (mt aberrantMessageType) New() pref.Message { +func (mt aberrantMessageType) New() protoreflect.Message { if mt.t.Kind() == reflect.Ptr { return aberrantMessage{reflect.New(mt.t.Elem())} } return aberrantMessage{reflect.Zero(mt.t)} } -func (mt aberrantMessageType) Zero() pref.Message { +func (mt aberrantMessageType) Zero() protoreflect.Message { return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) GoType() reflect.Type { return mt.t } -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(mt.t) } @@ -499,56 +502,56 @@ func (m aberrantMessage) Reset() { } } -func (m aberrantMessage) ProtoReflect() pref.Message { +func (m aberrantMessage) ProtoReflect() protoreflect.Message { return m } -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(m.v.Type()) } -func (m aberrantMessage) Type() pref.MessageType { +func (m aberrantMessage) Type() protoreflect.MessageType { return aberrantMessageType{m.v.Type()} } -func (m aberrantMessage) New() pref.Message { +func (m aberrantMessage) New() protoreflect.Message { if m.v.Type().Kind() == reflect.Ptr { return aberrantMessage{reflect.New(m.v.Type().Elem())} } return aberrantMessage{reflect.Zero(m.v.Type())} } -func (m aberrantMessage) Interface() pref.ProtoMessage { +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { return m } -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { return } -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { return false } -func (m aberrantMessage) Clear(pref.FieldDescriptor) { +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { if fd.Default().IsValid() { return fd.Default() } panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) GetUnknown() pref.RawFields { +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { return nil } -func (m aberrantMessage) SetUnknown(pref.RawFields) { +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { @@ -557,7 +560,7 @@ func (m aberrantMessage) IsValid() bool { } return false } -func (m aberrantMessage) ProtoMethods() *piface.Methods { +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/merge.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/merge.go index c65bbc044..7e65f64f2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -9,8 +9,8 @@ import ( "reflect" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) type mergeOptions struct{} @@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { } // merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { dp, ok := mi.getPointer(in.Destination) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } sp, ok := mi.getPointer(in.Source) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { @@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { continue } dx := (*dext)[num] - var dv pref.Value + var dv protoreflect.Value if dx.Type() == sx.Type() { dv = dx.Value() } @@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { return src } -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) } -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { @@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { return dst } -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sb := srcl.Get(i).Bytes() db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) + dstl.Append(protoreflect.ValueOfBytes(db)) } return dst } -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sm := srcl.Get(i).Message() dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) + dstl.Append(protoreflect.ValueOfMessage(dm)) } return dst } -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { opts.Merge(dst.Message().Interface(), src.Message().Interface()) return dst } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message.go index a104e28e8..629bacdce 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,8 +14,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -29,7 +28,7 @@ type MessageInfo struct { GoReflectType reflect.Type // pointer to struct // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor + Desc protoreflect.MessageDescriptor // Exporter must be provided in a purego environment in order to provide // access to unexported fields. @@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} // is generated by our implementation of protoc-gen-go (for v2 and on). // If it is unable to obtain a MessageInfo, it returns nil. func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) if !ok { return nil } @@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { // getPointer returns the pointer for a message, which should be of // the type of the MessageInfo. If the message is of a different type, // it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { switch m := m.(type) { case *messageState: return m.pointer(), m.messageInfo() == mi @@ -134,10 +133,10 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type } func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { @@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { unknownOffset: invalidOffset, extensionOffset: invalidOffset, - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, } fieldLoop: @@ -180,12 +179,12 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f continue fieldLoop } } if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f + si.oneofsByName[protoreflect.Name(s)] = f continue fieldLoop } } @@ -193,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } @@ -208,8 +212,8 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf break } } @@ -219,7 +223,11 @@ fieldLoop: } func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) } func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) @@ -237,7 +245,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { fd := mi.Desc.Fields().Get(i) switch { case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 9488b7261..d9ea010be 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -10,17 +10,17 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -30,8 +30,8 @@ type reflectMessageInfo struct { // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. rangeInfos []interface{} // either *fieldInfo or *oneofInfo - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) extensionMap func(pointer) *extensionMap nilMessage atomicNilMessage @@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { // This code assumes that the struct is well-formed and panics if there are // any discrepancies. func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} md := mi.Desc fds := md.Fields() for i := 0; i < fds.Len(); i++ { @@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.fields[fd.Number()] = &fi } - mi.oneofs = map[pref.Name]*oneofInfo{} + mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) @@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { switch { case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } return *p.Apply(mi.unknownOffset).Bytes() } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } @@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } return **bp } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { **bp = b } default: - mi.getUnknown = func(pointer) pref.RawFields { + mi.getUnknown = func(pointer) protoreflect.RawFields { return nil } - mi.setUnknown = func(p pointer, _ pref.RawFields) { + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { type extensionMap map[int32]ExtensionField -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { if m != nil { for _, x := range *m { xd := x.Type().TypeDescriptor() @@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { } } } -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } @@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { } return true } -func (m *extensionMap) Clear(xt pref.ExtensionType) { +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { @@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { } return xt.Zero() } -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { xd := xt.TypeDescriptor() isValid := true switch { @@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { @@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // in an allocation-free way without needing to have a shadow Go type generated // for every message type. This technique only works using unsafe. // -// // Example generated code: // // type M struct { @@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // It has access to the message info as its first field, and a pointer to the // MessageState is identical to a pointer to the concrete message value. // -// // Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. type MessageState struct { pragma.NoUnkeyedLiterals pragma.DoNotCompare @@ -368,8 +366,8 @@ type MessageState struct { type messageState MessageState var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) ) // messageDataType is a tuple of a pointer to the message data and @@ -387,16 +385,16 @@ type ( ) var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) ) // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } } -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } func (m *messageIfaceWrapper) protoUnwrap() interface{} { @@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext if !mi.Desc.ExtensionRanges().Has(fd.Number()) { panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 343cf8721..5e736c60e 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -11,24 +11,24 @@ import ( "sync" "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { - fieldDesc pref.FieldDescriptor + fieldDesc protoreflect.FieldDescriptor // These fields are used for protobuf reflection support. has func(pointer) bool clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value } -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { // This never occurs for generated message types. // It implies that a hand-crafted type has missing Go fields // for specific protobuf message fields. @@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { clear: func(p pointer) { panic("missing Go struct field for " + string(fd.FullName())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { return fd.Default() }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { panic("missing Go struct field for " + string(fd.FullName())) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { panic("missing Go struct field for " + string(fd.FullName())) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { panic("missing Go struct field for " + string(fd.FullName())) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { if v := fd.Default(); v.IsValid() { return v } @@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { } } -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) @@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { rv.Set(reflect.New(ot)) @@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) rv.Set(conv.GoValueOf(v)) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { if !isMessage { panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) } @@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv = rv.Elem().Elem().Field(0) if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) @@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } rv.Set(pv) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if v.IsNil() { v.Set(reflect.MakeMap(fs.Type)) } return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Slice { panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) @@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } rv.Set(pv.Elem()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type) return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } @@ -252,7 +252,7 @@ var ( emptyBytes = reflect.ValueOf([]byte{}) ) -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 @@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { if rv.IsNil() { @@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } } }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { if !flags.ProtoLegacy { panic("no support for proto1 weak fields") } var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) if messageType == nil { panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) } @@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn clear: func(p pointer) { p.Apply(weakOffset).WeakFields().clear(num) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { lazyInit() if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } m, ok := p.Apply(weakOffset).WeakFields().get(num) if !ok { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { lazyInit() m := v.Message() if m.Descriptor() != messageType.Descriptor() { @@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn } p.Apply(weakOffset).WeakFields().set(num, m.Interface()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { lazyInit() fs := p.Apply(weakOffset).WeakFields() m, ok := fs.get(num) @@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn m = messageType.New().Interface() fs.set(num, m) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { lazyInit() return messageType.New() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { lazyInit() - return pref.ValueOfMessage(messageType.New()) + return protoreflect.ValueOfMessage(messageType.New()) }, } } -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) @@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } @@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } else { fs := si.oneofsByName[od.Name()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821e..517e94434 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl @@ -158,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85..4b020e311 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl @@ -137,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/validate.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/validate.go index 08cfb6054..a24e6bbd7 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -16,9 +16,9 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) // ValidationStatus is the result of validating the wire-format encoding of a message. @@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { // of the message type. // // This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { mi, ok := mt.(*MessageInfo) if !ok { return out, ValidationUnknown } if in.Resolver == nil { - in.Resolver = preg.GlobalTypes + in.Resolver = protoregistry.GlobalTypes } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, }) if o.initialized { - out.Flags |= piface.UnmarshalInitialized + out.Flags |= protoiface.UnmarshalInitialized } return out, st } @@ -106,22 +106,22 @@ const ( validationTypeMessageSetItem ) -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String } @@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip default: vi = newValidationInfo(fd, ft) } - if fd.Cardinality() == pref.Required { + if fd.Cardinality() == protoreflect.Required { // Avoid overflow. The required field check is done with a 64-bit mask, with // any message containing more than 64 required fields always reported as // potentially uninitialized, so it is not important to get a precise count @@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip return vi } -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.IsList(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo case fd.IsMap(): vi.typ = validationTypeMap switch fd.MapKey().Kind() { - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.keyType = validationTypeUTF8String } } switch fd.MapValue().Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.valType = validationTypeMessage if ft.Kind() == reflect.Map { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.valType = validationTypeUTF8String } } default: switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if !fd.IsWeak() { vi.mi = getMessageInfo(ft) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -314,11 +314,11 @@ State: break } messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) switch err { case nil: vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: + case protoregistry.NotFound: vi.typ = validationTypeBytes default: return out, ValidationUnknown @@ -335,7 +335,7 @@ State: // unmarshaling to begin failing. Supporting this requires some way to // determine if the resolver is frozen. xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { + if err != nil && err != protoregistry.NotFound { return out, ValidationUnknown } if err == nil { @@ -513,7 +513,7 @@ State: } xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) switch { - case err == preg.NotFound: + case err == protoregistry.NotFound: b = b[n:] case err != nil: return out, ValidationUnknown diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/weak.go b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/weak.go index 009cbefd1..eb79a7ba9 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -7,7 +7,7 @@ package impl import ( "fmt" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -17,32 +17,32 @@ import ( // defined directly on it. type weakFields WeakFields -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { m, ok := w[int32(num)] return m, ok } -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { if *w == nil { *w = make(weakFields) } (*w)[int32(num)] = m } -func (w *weakFields) clear(num pref.FieldNumber) { +func (w *weakFields) clear(num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { _, ok := w[int32(num)] return ok } -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { if m, ok := w[int32(num)]; ok { return m } @@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr return mt.Zero().Interface() } -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { if m != nil { mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) if mt == nil { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/order/order.go b/event-exporter/vendor/google.golang.org/protobuf/internal/order/order.go index 2a24953f6..dea522e12 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/order/order.go @@ -5,12 +5,12 @@ package order import ( - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // FieldOrder specifies the ordering to visit message fields. // It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool var ( // AnyFieldOrder specifies no specific field ordering. @@ -18,9 +18,9 @@ var ( // LegacyFieldOrder sorts fields in the same ordering as emitted by // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { + inOneof := func(od protoreflect.OneofDescriptor) bool { return od != nil && !od.IsSynthetic() } @@ -33,7 +33,7 @@ var ( return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. @@ -41,14 +41,14 @@ var ( } // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { return x.Number() < y.Number() } // IndexNameFieldOrder sorts non-extension fields before extension fields. // Non-extensions are sorted according to their declaration index. // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { // Non-extension fields sort before extension fields. if x.IsExtension() != y.IsExtension() { return !x.IsExtension() && y.IsExtension() @@ -64,7 +64,7 @@ var ( // KeyOrder specifies the ordering to visit map entries. // It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool +type KeyOrder func(x, y protoreflect.MapKey) bool var ( // AnyKeyOrder specifies no specific key ordering. @@ -72,7 +72,7 @@ var ( // GenericKeyOrder sorts false before true, numeric keys in ascending order, // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { switch x.Interface().(type) { case bool: return !x.Bool() && y.Bool() diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/order/range.go b/event-exporter/vendor/google.golang.org/protobuf/internal/order/range.go index c8090e0c5..1665a68e5 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/order/range.go @@ -9,12 +9,12 @@ import ( "sort" "sync" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type messageField struct { - fd pref.FieldDescriptor - v pref.Value + fd protoreflect.FieldDescriptor + v protoreflect.Value } var messageFieldPool = sync.Pool{ @@ -25,8 +25,8 @@ type ( // FieldRnger is an interface for visiting all fields in a message. // The protoreflect.Message type implements this interface. FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool ) // RangeFields iterates over the fields of fs according to the specified order. @@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { }() // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { fields = append(fields, messageField{fd, v}) return true }) @@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { } type mapEntry struct { - k pref.MapKey - v pref.Value + k protoreflect.MapKey + v protoreflect.Value } var mapEntryPool = sync.Pool{ @@ -76,8 +76,8 @@ type ( // EntryRanger is an interface for visiting all fields in a message. // The protoreflect.Map type implements this interface. EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool ) // RangeEntries iterates over the entries of es according to the specified order. @@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { }() // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { entries = append(entries, mapEntry{k, v}) return true }) diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 87% rename from event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 2160c7019..a008acd09 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,14 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs import ( "unsafe" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -58,7 +59,7 @@ type Builder struct { // AppendFullName is equivalent to protoreflect.FullName.Append, // but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { n := len(prefix) + len(".") + len(name) if len(prefix) == 0 { n -= len(".") @@ -67,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful sb.buf = append(sb.buf, prefix...) sb.buf = append(sb.buf, '.') sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) + return protoreflect.FullName(sb.last(n)) } // MakeString is equivalent to string(b), but optimized for large batches @@ -86,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 000000000..60166f2ba --- /dev/null +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/event-exporter/vendor/google.golang.org/protobuf/internal/version/version.go b/event-exporter/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2..d8f48faff 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/event-exporter/vendor/google.golang.org/protobuf/internal/version/version.go @@ -12,54 +12,54 @@ import ( // These constants determine the current version of this module. // -// // For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. // // Steps for tagging a new release: -// 1. Create a new CL. // -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". +// 1. Create a new CL. // -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". // -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. // -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. // -// 6. Tag a new version, where the tag is is the current String. +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. // -// 7. Write release notes for all notable changes -// between this release and the last release. +// 6. Tag a new version, where the tag is is the current String. // -// 8. Create a new CL. +// 7. Write release notes for all notable changes +// between this release and the last release. // -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// 8. Create a new CL. // -// 10. Send out the CL for review and submit it. +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 - Patch = 1 + Minor = 32 + Patch = 0 PreRelease = "" ) // String formats the version string for this module in semver format. // // Examples: +// // v1.20.1 // v1.21.0-rc.1 func String() string { diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/decode.go b/event-exporter/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88..e5b03b567 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/decode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/decode.go @@ -19,7 +19,8 @@ import ( // UnmarshalOptions configures the unmarshaler. // // Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) type UnmarshalOptions struct { pragma.NoUnkeyedLiterals @@ -42,18 +43,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -61,8 +69,11 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +97,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/doc.go b/event-exporter/vendor/google.golang.org/protobuf/proto/doc.go index c52d8c4ab..80ed16a0c 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/doc.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,89 +5,81 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: +// https://protobuf.dev/reference/go/go-generated. // -// https://developers.google.com/protocol-buffers/docs/reference/go-generated -// -// -// Binary serialization +// # Binary serialization // // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // +// # Basic message operations // -// Basic message operations +// - [Clone] makes a deep copy of a message. // -// • Clone makes a deep copy of a message. +// - [Merge] merges the content of a message into another. // -// • Merge merges the content of a message into another. +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Reset] clears the content of a message. // -// • Reset clears the content of a message. +// - [CheckInitialized] reports whether all required fields in a message are set. // -// • CheckInitialized reports whether all required fields in a message are set. -// -// -// Optional scalar constructors +// # Optional scalar constructors // // The API for some generated messages represents optional scalar fields // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. // // Optional scalar fields are only supported in proto2. // +// # Extension accessors // -// Extension accessors -// -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // +// # Related packages // -// Related packages -// -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/encode.go b/event-exporter/vendor/google.golang.org/protobuf/proto/encode.go index d18239c23..4fed202f9 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/encode.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/encode.go @@ -16,7 +16,8 @@ import ( // MarshalOptions configures the marshaler. // // Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) type MarshalOptions struct { pragma.NoUnkeyedLiterals @@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { // otherwise it returns a non-nil empty buffer. // // This is to assist the edge-case where user-code does the following: +// // m1.OptionalBytes, _ = proto.Marshal(m2) +// // where they expect the proto2 "optional_bytes" field to be populated // if any only if m2 is a valid message. func emptyBytesForMessage(m Message) []byte { @@ -126,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/equal.go b/event-exporter/vendor/google.golang.org/protobuf/proto/equal.go index 4dba2b969..1a0be1b03 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/equal.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,163 +5,53 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } mx := x.ProtoReflect() my := y.ProtoReflect() if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch fd.Kind() { - case pref.BoolKind: - return x.Bool() == y.Bool() - case pref.EnumKind: - return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: - return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: - return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case pref.StringKind: - return x.String() == y.String() - case pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/extension.go b/event-exporter/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda8..17899a3a7 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/extension.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/merge.go b/event-exporter/vendor/google.golang.org/protobuf/proto/merge.go index d761ab331..3c6fe5780 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/merge.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/proto.go b/event-exporter/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b1..7543ee6b2 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/proto.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/proto_methods.go b/event-exporter/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/event-exporter/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/event-exporter/vendor/google.golang.org/protobuf/proto/size.go b/event-exporter/vendor/google.golang.org/protobuf/proto/size.go index 554b9c6c0..f1692b49b 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/proto/size.go +++ b/event-exporter/vendor/google.golang.org/protobuf/proto/size.go @@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb1205..baa0cc621 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1af..aff6fd490 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + f.L1.HasPacked = true + f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + + if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index cebb36cda..27d7e3501 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", // then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { if !ref.IsValid() { return nil, errors.New("invalid name reference: %q", ref) diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 000000000..7352926ca --- /dev/null +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,177 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + _ "embed" + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +//go:embed editions_defaults.binpb +var binaryEditionDefaults []byte +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(binaryEditionDefaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.FieldPresence == nil { + return fileDesc.L1.EditionFeatures.IsFieldPresence + } + return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT +} + +func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.RepeatedFieldEncoding == nil { + return fileDesc.L1.EditionFeatures.IsPacked + } + return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED +} + +func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.Utf8Validation == nil { + return fileDesc.L1.EditionFeatures.IsUTF8Validated + } + return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY +} + +func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.MessageEncoding == nil { + return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + } + return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + if fs == nil { + fs = &descriptorpb.FeatureSet{} + } + + var fieldPresence descriptorpb.FeatureSet_FieldPresence + if fp := fs.FieldPresence; fp != nil { + fieldPresence = *fp + } else { + fieldPresence = *dfs.FieldPresence + } + fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fieldPresence == descriptorpb.FeatureSet_EXPLICIT + + var enumType descriptorpb.FeatureSet_EnumType + if et := fs.EnumType; et != nil { + enumType = *et + } else { + enumType = *dfs.EnumType + } + fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN + + var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding + if rfe := fs.RepeatedFieldEncoding; rfe != nil { + respeatedFieldEncoding = *rfe + } else { + respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + } + fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED + + var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation + if utf8val := fs.Utf8Validation; utf8val != nil { + isUTF8Validated = *utf8val + } else { + isUTF8Validated = *dfs.Utf8Validation + } + fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY + + var messageEncoding descriptorpb.FeatureSet_MessageEncoding + if me := fs.MessageEncoding; me != nil { + messageEncoding = *me + } else { + messageEncoding = *dfs.MessageEncoding + } + fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED + + var jsonFormat descriptorpb.FeatureSet_JsonFormat + if jf := fs.JsonFormat; jf != nil { + jsonFormat = *jf + } else { + jsonFormat = *dfs.JsonFormat + } + fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW +} diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb new file mode 100644 index 000000000..1a8610a84 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceffc..9d6e05420 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index dd85915bd..ec6572dfd 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -8,60 +8,55 @@ // defined in proto source files and value interfaces which provide the // ability to examine and manipulate the contents of messages. // +// # Protocol Buffer Descriptors // -// Protocol Buffer Descriptors -// -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // +// # Go Type Descriptors // -// Go Type Descriptors -// -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // +// # Value Interfaces // -// Value Interfaces -// -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // -// -// Relationships +// # Relationships // // The following diagrams demonstrate the relationships between // various types declared in this package. // -// // ┌───────────────────────────────────┠// V │ // ┌────────────── New(n) ─────────────┠│ @@ -76,13 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. -// -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -96,14 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// -// • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. +// +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. +// +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -115,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -138,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -147,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -432,7 +439,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -476,7 +483,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 121ba3a07..0b9942885 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { // in a future version of this module. // // Example output: +// // .message_type[6].nested_type[15].field[3] func (p SourcePath) String() string { b := p.appendFileDescriptorProto(nil) diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223c..0c045db6a 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 14: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -178,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -236,6 +240,10 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +287,10 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -324,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -345,16 +359,49 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -404,6 +451,12 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -413,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -426,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -441,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -459,3 +533,22 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 8e53c44a9..60ff62b4c 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,16 +470,17 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // // For example: +// // syntax = "proto2"; // package example; // message FooMessage { @@ -531,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -547,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -609,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f31981077..a7b0d06ff 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -148,11 +148,11 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 000000000..654599d44 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since [Value] does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - [Message] values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - [List] values are equal if they are the same length and +// each corresponding element is equal. +// +// - [Map] values are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..160309731 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,16 +31,42 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// [List], [Map], and [Message] Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as [Message.Mutable], +// always references the source object. +// +// For example: +// +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).List() +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as [Message.Get], may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} @@ -280,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -301,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -311,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -321,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -331,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -341,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -352,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -366,12 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: +// // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -399,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index c45debdca..b1fdbe3e8 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 000000000..435470111 --- /dev/null +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 59f024c44..6267dc52a 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/event-exporter/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -30,9 +30,11 @@ import ( // conflictPolicy configures the policy for handling registration conflicts. // // It can be over-written at compile time with a linker-initialized variable: +// // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" // // It can be over-written at program execution with an environment variable: +// // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main // // Neither of the above are covered by the compatibility promise and @@ -44,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v @@ -216,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -308,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -429,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -449,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -588,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -609,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -630,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -660,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -701,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/event-exporter/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/event-exporter/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/event-exporter/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/event-exporter/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/event-exporter/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go index ff094e1ba..a105cb23e 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ b/event-exporter/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -26,16 +26,19 @@ const ( // EnforceVersion is used by code generated by protoc-gen-go // to statically enforce minimum and maximum versions of this package. // A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. // // The runtime package can be upgraded by running: +// // go get google.golang.org/protobuf // // The generated code can be regenerated by running: +// // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} // // Example usage by generated code: +// // const ( // // Verify that this generated code is sufficiently up-to-date. // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) @@ -49,6 +52,7 @@ const ( type EnforceVersion uint // This enforces the following invariant: +// // MinVersion ≤ GenVersion ≤ MaxVersion const ( _ = EnforceVersion(GenVersion - MinVersion) diff --git a/event-exporter/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/event-exporter/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab511..38daa858d 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/event-exporter/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,152 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -67,9 +213,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -137,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -168,21 +315,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -197,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +438,13 @@ type FieldOptions_CType int32 const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +474,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -369,103 +525,670 @@ var ( } ) -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[7] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[9] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_NONE FeatureSet_Utf8Validation = 1 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 1: "NONE", + 2: "VERIFY", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "NONE": 1, + "VERIFY": 2, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) *p = x return p } -func (x FieldOptions_JSType) String() string { +func (x FeatureSet_JsonFormat) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } -func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] } -func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Do not use. -func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = FieldOptions_JSType(num) + *x = FeatureSet_JsonFormat(num) return nil } -// Deprecated: Use FieldOptions_JSType.Descriptor instead. -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 ) -// Enum value maps for MethodOptions_IdempotencyLevel. +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. var ( - MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", } - MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, } ) -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) *p = x return p } -func (x MethodOptions_IdempotencyLevel) String() string { +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } -func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] } -func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Do not use. -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MethodOptions_IdempotencyLevel(num) + *x = GeneratedCodeInfo_Annotation_Semantic(num) return nil } -// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -544,8 +1267,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +1391,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -794,7 +1528,22 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` + // The verification state of the range. + // TODO: flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} @@ -835,6 +1584,27 @@ func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1630,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +2151,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1460,6 +2229,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1531,7 +2302,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1651,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1670,10 +2448,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +2472,46 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2583,21 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1800,14 +2613,18 @@ type FieldOptions struct { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -1838,7 +2655,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2665,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2680,27 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2763,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2784,41 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { + if x != nil { + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1957,6 +2832,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1993,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2898,17 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2964,21 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2087,13 +2997,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2135,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2148,6 +3079,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2194,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2220,6 +3160,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2276,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2386,6 +3335,171 @@ func (x *UninterpretedOption) GetAggregateValue() string { return "" } +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { + if x != nil { + return x.Defaults + } + return nil +} + +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN +} + // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { @@ -2399,50 +3513,55 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2455,7 +3574,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2468,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2494,7 +3613,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2507,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2520,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2543,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2556,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2608,7 +3727,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2621,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2651,6 +3770,96 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2669,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2682,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2712,11 +3921,66 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2729,7 +3993,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2742,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2772,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2781,23 +4104,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +4160,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2862,7 +4196,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2875,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2888,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -2940,15 +4274,16 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2961,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2974,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3005,6 +4340,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,66 +4396,98 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, + 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, @@ -3164,9 +4538,9 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, + 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, @@ -3230,7 +4604,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, @@ -3293,197 +4667,416 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, + 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, + 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, + 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, + 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, + 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, + 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, + 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, + 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, + 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, + 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, + 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, + 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, + 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, + 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, + 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, + 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, + 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, + 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, + 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, + 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, + 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, + 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, + 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, + 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, + 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, + 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, + 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, + 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, + 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, + 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, + 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, + 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, + 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, + 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, + 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, + 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, + 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, + 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -3498,92 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3839,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -3863,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -3875,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -3887,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -3899,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -3911,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -3923,6 +5562,66 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -3940,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, - NumMessages: 27, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/event-exporter/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/event-exporter/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b9..9de51be54 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/event-exporter/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,22 +226,22 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/event-exporter/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/event-exporter/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f6..df709a8dd 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/event-exporter/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/event-exporter/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/event-exporter/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae92132..81511a336 100644 --- a/event-exporter/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/event-exporter/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -172,10 +167,8 @@ import ( // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/event-exporter/vendor/modules.txt b/event-exporter/vendor/modules.txt index 353579e57..132ec958e 100644 --- a/event-exporter/vendor/modules.txt +++ b/event-exporter/vendor/modules.txt @@ -1,37 +1,50 @@ -# cloud.google.com/go/compute v1.5.0 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.23.4 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.3 +## explicit; go 1.19 cloud.google.com/go/compute/metadata # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/go-logr/logr v1.2.2 -## explicit; go 1.16 +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop +# github.com/go-logr/logr v1.4.1 +## explicit; go 1.18 github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v1.0.0 -## explicit; go 1.11 +# github.com/golang/glog v1.1.2 +## explicit; go 1.19 github.com/golang/glog +github.com/golang/glog/internal/logsink +github.com/golang/glog/internal/stackdump # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.2 +# github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.5.7 -## explicit; go 1.11 +# github.com/google/go-cmp v0.6.0 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -41,11 +54,43 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/googleapis/gax-go/v2 v2.1.1 -## explicit; go 1.11 +# github.com/google/s2a-go v0.1.7 +## explicit; go 1.19 +github.com/google/s2a-go +github.com/google/s2a-go/fallback +github.com/google/s2a-go/internal/authinfo +github.com/google/s2a-go/internal/handshaker +github.com/google/s2a-go/internal/handshaker/service +github.com/google/s2a-go/internal/proto/common_go_proto +github.com/google/s2a-go/internal/proto/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/s2a_go_proto +github.com/google/s2a-go/internal/proto/v2/common_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto +github.com/google/s2a-go/internal/record +github.com/google/s2a-go/internal/record/internal/aeadcrypter +github.com/google/s2a-go/internal/record/internal/halfconn +github.com/google/s2a-go/internal/tokenmanager +github.com/google/s2a-go/internal/v2 +github.com/google/s2a-go/internal/v2/certverifier +github.com/google/s2a-go/internal/v2/remotesigner +github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/retry +github.com/google/s2a-go/stream +# github.com/google/uuid v1.6.0 +## explicit +github.com/google/uuid +# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +## explicit; go 1.19 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.12.1 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx +github.com/googleapis/gax-go/v2/internal # github.com/googleapis/gnostic v0.5.5 ## explicit; go 1.12 github.com/googleapis/gnostic/compiler @@ -82,7 +127,7 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -100,58 +145,92 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/net v0.17.0 -## explicit; go 1.17 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 +## explicit; go 1.20 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.23.0 +## explicit; go 1.20 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.20.0 +# go.opentelemetry.io/otel/metric v1.23.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.23.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +# golang.org/x/crypto v0.19.0 +## explicit; go 1.18 +golang.org/x/crypto/chacha20 +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/alias +golang.org/x/crypto/internal/poly1305 +# golang.org/x/net v0.21.0 +## explicit; go 1.18 golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b -## explicit; go 1.11 +# golang.org/x/oauth2 v0.17.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +# golang.org/x/sys v0.17.0 +## explicit; go 1.18 +golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.13.0 -## explicit; go 1.17 +# golang.org/x/term v0.17.0 +## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.14.0 +## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/api v0.70.0 -## explicit; go 1.15 +# google.golang.org/api v0.167.0 +## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/logging/v2 google.golang.org/api/option google.golang.org/api/option/internaloption -google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport/internal/dca -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -163,13 +242,13 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 -## explicit; go 1.15 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 +## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.44.0 -## explicit; go 1.14 +# google.golang.org/grpc v1.61.1 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -178,6 +257,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -187,6 +267,7 @@ google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -197,9 +278,12 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -211,12 +295,13 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.32.0 +## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire