From 8282a2f538483919f0e7010c6958e4f7beb3db8e Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 15 Jan 2024 16:33:51 -0800 Subject: [PATCH 1/3] Upgrade Prometheus version to include loser tree postings improvement (#5711) * upgrade Thanos version to include loser tree postings improvement Signed-off-by: Ben Ye * update golangci-lint version Signed-off-by: Ben Ye * disable revive Signed-off-by: Ben Ye * fix golint issue Signed-off-by: Ben Ye * fix lint again Signed-off-by: Ben Ye * update promqlsmith dependency Signed-off-by: Ben Ye * fix build Signed-off-by: Ben Ye * fix lint Signed-off-by: Ben Ye * upgrade thanos version to latest main Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- .github/workflows/test-build-deploy.yml | 12 +- .golangci.yml | 14 +- build-image/Dockerfile | 2 +- go.mod | 90 +- go.sum | 219 ++- integration/backward_compatibility_test.go | 8 - pkg/cortexpb/compat.go | 21 +- pkg/cortexpb/compat_test.go | 10 +- pkg/distributor/distributor.go | 4 +- pkg/querier/testutils.go | 5 +- pkg/ruler/compat.go | 5 + pkg/ruler/manager.go | 2 +- pkg/ruler/notifier.go | 5 +- .../bucketindex/block_ids_fetcher_test.go | 6 +- pkg/storage/tsdb/inmemory_index_cache_test.go | 11 +- pkg/storage/tsdb/users_scanner.go | 3 +- pkg/tracing/migration/bridge_wrapper_test.go | 3 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 82 +- .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 42 +- .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 7 + .../sdk/azcore/internal/exported/exported.go | 34 + .../sdk/azcore/internal/exported/pipeline.go | 20 - .../sdk/azcore/internal/exported/request.go | 31 + .../internal/exported/response_error.go | 51 +- .../sdk/azcore/internal/pollers/fake/fake.go | 133 ++ .../sdk/azcore/internal/shared/constants.go | 12 +- .../sdk/azcore/internal/shared/shared.go | 69 +- .../sdk/azcore/policy/policy.go | 6 +- .../sdk/azcore/runtime/pager.go | 18 +- .../sdk/azcore/runtime/pipeline.go | 36 +- .../sdk/azcore/runtime/policy_bearer_token.go | 39 +- .../sdk/azcore/runtime/policy_http_trace.go | 143 ++ .../azcore/runtime/policy_key_credential.go | 16 +- .../sdk/azcore/runtime/policy_logging.go | 3 +- .../sdk/azcore/runtime/policy_retry.go | 10 +- .../azcore/runtime/policy_sas_credential.go | 10 +- .../sdk/azcore/runtime/policy_telemetry.go | 4 + .../sdk/azcore/runtime/poller.go | 103 +- .../sdk/azcore/runtime/request.go | 97 +- .../sdk/azcore/runtime/response.go | 30 +- .../runtime/transport_default_http_client.go | 10 + .../sdk/azcore/tracing/tracing.go | 61 +- .../sdk/internal/errorinfo/errorinfo.go | 30 + .../alecthomas/units/renovate.json5 | 11 + .../aws/credentials/endpointcreds/provider.go | 47 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 64 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1382 ++++++++++++- .../aws/aws-sdk-go/aws/session/env_config.go | 28 + .../aws/aws-sdk-go/aws/session/session.go | 8 + .../aws-sdk-go/aws/session/shared_config.go | 10 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 1 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 35 +- .../aws/aws-sdk-go/service/dynamodb/errors.go | 4 + .../aws/aws-sdk-go/service/sns/api.go | 50 +- .../aws/aws-sdk-go/service/sns/errors.go | 19 +- .../aws/aws-sdk-go/service/ssooidc/api.go | 666 ++++++- .../aws/aws-sdk-go/service/ssooidc/doc.go | 39 +- .../aws/aws-sdk-go/service/ssooidc/errors.go | 8 + .../aws/aws-sdk-go/service/ssooidc/service.go | 2 +- .../aws/aws-sdk-go/service/sts/api.go | 20 +- .../github.com/bboreham/go-loser/.gitignore | 21 + vendor/github.com/bboreham/go-loser/LICENSE | 201 ++ vendor/github.com/bboreham/go-loser/README.md | 8 + vendor/github.com/bboreham/go-loser/tree.go | 153 ++ .../cortexproject/promqlsmith/.go-version | 2 +- .../cortexproject/promqlsmith/opts.go | 5 +- .../cortexproject/promqlsmith/walk.go | 14 +- .../github.com/felixge/httpsnoop/.travis.yml | 6 - vendor/github.com/felixge/httpsnoop/Makefile | 2 +- vendor/github.com/felixge/httpsnoop/README.md | 4 +- .../felixge/httpsnoop/capture_metrics.go | 2 +- .../httpsnoop/wrap_generated_gteq_1.8.go | 2 +- .../httpsnoop/wrap_generated_lt_1.8.go | 2 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 13 + .../github.com/fsnotify/fsnotify/.gitignore | 1 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 83 +- vendor/github.com/fsnotify/fsnotify/README.md | 81 +- .../fsnotify/fsnotify/backend_fen.go | 552 +++++- .../fsnotify/fsnotify/backend_inotify.go | 377 ++-- .../fsnotify/fsnotify/backend_kqueue.go | 295 +-- .../fsnotify/fsnotify/backend_other.go | 205 +- .../fsnotify/fsnotify/backend_windows.go | 247 ++- .../github.com/fsnotify/fsnotify/fsnotify.go | 91 +- vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 125 +- vendor/github.com/go-logr/logr/README.md | 113 +- vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/funcr/funcr.go | 48 +- vendor/github.com/go-logr/logr/logr.go | 35 +- .../go-logr/logr/slogr/sloghandler.go | 168 ++ vendor/github.com/go-logr/logr/slogr/slogr.go | 108 + .../github.com/go-logr/logr/slogr/slogsink.go | 122 ++ .../go-openapi/runtime/client/auth_info.go | 77 + .../go-openapi/runtime/client/keepalive.go | 55 + .../runtime/client/opentelemetry.go | 207 ++ .../go-openapi/runtime/client/opentracing.go | 99 + .../go-openapi/runtime/client/request.go | 481 +++++ .../go-openapi/runtime/client/response.go | 50 + .../go-openapi/runtime/client/runtime.go | 537 +++++ .../go-openapi/runtime/yamlpc/yaml.go | 40 + .../go-openapi/strfmt/.golangci.yml | 92 +- vendor/github.com/go-openapi/strfmt/README.md | 5 +- vendor/github.com/go-openapi/strfmt/bson.go | 6 +- vendor/github.com/go-openapi/strfmt/format.go | 2 +- vendor/github.com/go-openapi/strfmt/time.go | 8 +- .../hashicorp/consul/api/.copywrite.hcl | 8 + .../github.com/hashicorp/consul/api/LICENSE | 303 +-- vendor/github.com/hashicorp/consul/api/acl.go | 113 ++ .../github.com/hashicorp/consul/api/agent.go | 18 + .../hashicorp/consul/api/config_entry.go | 53 +- .../consul/api/config_entry_discoverychain.go | 22 +- .../consul/api/config_entry_gateways.go | 40 + .../consul/api/config_entry_rate_limit_ip.go | 8 +- .../consul/api/config_entry_routes.go | 40 +- .../consul/api/config_entry_status.go | 19 + .../hashicorp/consul/api/internal.go | 3 + .../hashicorp/consul/api/operator_raft.go | 7 +- .../hashicorp/go-version/CHANGELOG.md | 45 + .../github.com/hashicorp/go-version/LICENSE | 354 ++++ .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 296 +++ .../hashicorp/go-version/version.go | 407 ++++ .../go-version/version_collection.go | 17 + .../klauspost/compress/gzhttp/transport.go | 25 +- .../compress/gzhttp/writer/gzkp/gzkp.go | 6 + .../klauspost/compress/gzip/gunzip.go | 5 + .../klauspost/compress/huff0/bytereader.go | 44 - .../klauspost/compress/huff0/compress.go | 5 +- .../klauspost/compress/huff0/huff0.go | 4 +- .../klauspost/compress/zstd/README.md | 2 +- vendor/github.com/metalmatze/signal/LICENSE | 201 ++ .../server/signalhttp/instrumentation.go | 96 + vendor/github.com/miekg/dns/acceptfunc.go | 2 - vendor/github.com/miekg/dns/defaults.go | 41 +- vendor/github.com/miekg/dns/dnssec_keyscan.go | 3 +- vendor/github.com/miekg/dns/edns.go | 3 +- vendor/github.com/miekg/dns/generate.go | 33 +- .../miekg/dns/listen_no_reuseport.go | 10 +- .../github.com/miekg/dns/listen_reuseport.go | 30 +- vendor/github.com/miekg/dns/msg.go | 20 +- vendor/github.com/miekg/dns/msg_helpers.go | 50 +- vendor/github.com/miekg/dns/scan.go | 45 +- vendor/github.com/miekg/dns/scan_rr.go | 9 +- vendor/github.com/miekg/dns/server.go | 10 +- vendor/github.com/miekg/dns/svcb.go | 39 +- vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/xfr.go | 18 +- .../prom-label-proxy/LICENSE | 201 ++ .../prom-label-proxy/injectproxy/alerts.go | 26 + .../prom-label-proxy/injectproxy/enforce.go | 168 ++ .../prom-label-proxy/injectproxy/routes.go | 640 ++++++ .../prom-label-proxy/injectproxy/rules.go | 243 +++ .../prom-label-proxy/injectproxy/silences.go | 210 ++ .../prom-label-proxy/injectproxy/utils.go | 32 + .../api/v2/client/alert/alert_client.go | 133 ++ .../v2/client/alert/get_alerts_parameters.go | 387 ++++ .../v2/client/alert/get_alerts_responses.go | 244 +++ .../v2/client/alert/post_alerts_parameters.go | 167 ++ .../v2/client/alert/post_alerts_responses.go | 232 +++ .../v2/client/alertgroup/alertgroup_client.go | 93 + .../alertgroup/get_alert_groups_parameters.go | 348 ++++ .../alertgroup/get_alert_groups_responses.go | 244 +++ .../api/v2/client/alertmanager_api_client.go | 146 ++ .../api/v2/client/general/general_client.go | 93 + .../client/general/get_status_parameters.go | 142 ++ .../v2/client/general/get_status_responses.go | 112 ++ .../receiver/get_receivers_parameters.go | 142 ++ .../receiver/get_receivers_responses.go | 110 ++ .../api/v2/client/receiver/receiver_client.go | 93 + .../silence/delete_silence_parameters.go | 167 ++ .../silence/delete_silence_responses.go | 222 +++ .../client/silence/get_silence_parameters.go | 167 ++ .../client/silence/get_silence_responses.go | 236 +++ .../client/silence/get_silences_parameters.go | 189 ++ .../client/silence/get_silences_responses.go | 177 ++ .../silence/post_silences_parameters.go | 167 ++ .../client/silence/post_silences_responses.go | 284 +++ .../api/v2/client/silence/silence_client.go | 213 ++ .../client_golang/prometheus/histogram.go | 56 +- .../client_golang/prometheus/labels.go | 2 + .../prometheus/process_collector_other.go | 4 +- .../prometheus/process_collector_wasip1.go | 26 + .../prometheus/testutil/promlint/problem.go | 33 + .../prometheus/testutil/promlint/promlint.go | 308 +-- .../testutil/promlint/validation.go | 33 + .../validations/counter_validations.go | 40 + .../validations/generic_name_validations.go | 101 + .../promlint/validations/help_validations.go | 32 + .../validations/histogram_validations.go | 63 + .../testutil/promlint/validations/units.go | 118 ++ .../prometheus/testutil/testutil.go | 15 + .../prometheus/common/model/metadata.go | 28 + .../prometheus/procfs/Makefile.common | 2 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- .../prometheus/procfs/mountstats.go | 83 +- .../prometheus/procfs/proc_fdinfo.go | 8 +- .../github.com/prometheus/procfs/proc_maps.go | 20 +- .../prometheus/procfs/proc_status.go | 21 +- .../prometheus/prometheus/config/config.go | 12 +- .../prometheus/prometheus/discovery/README.md | 5 + .../prometheus/discovery/discovery.go | 10 + .../prometheus/discovery/dns/dns.go | 72 +- .../prometheus/discovery/file/file.go | 75 +- .../prometheus/discovery/manager.go | 75 +- .../prometheus/discovery/metrics.go | 101 + .../discovery/metrics_k8s_client.go | 198 ++ .../prometheus/discovery/refresh/refresh.go | 85 +- .../prometheus/prometheus/discovery/util.go | 72 + .../model/histogram/float_histogram.go | 164 +- .../prometheus/model/histogram/histogram.go | 83 +- .../prometheus/model/metadata/metadata.go | 4 +- .../prometheus/model/relabel/relabel.go | 15 +- .../prometheus/model/textparse/interface.go | 26 +- .../model/textparse/openmetricsparse.go | 30 +- .../prometheus/model/textparse/promparse.go | 23 +- .../model/textparse/protobufparse.go | 53 +- .../prompb/io/prometheus/client/metrics.pb.go | 174 +- .../prompb/io/prometheus/client/metrics.proto | 1 + .../prometheus/prometheus/prompb/types.pb.go | 2 +- .../prometheus/prometheus/prompb/types.proto | 2 +- .../prometheus/prometheus/promql/engine.go | 56 +- .../prometheus/prometheus/promql/functions.go | 20 + .../prometheus/promql/parser/functions.go | 6 + .../prometheus/promql/testdata/functions.test | 8 + .../prometheus/prometheus/scrape/manager.go | 22 +- .../prometheus/prometheus/scrape/metrics.go | 4 +- .../prometheus/prometheus/scrape/scrape.go | 49 +- .../prometheus/prometheus/scrape/target.go | 13 +- .../prometheus/prometheus/storage/buffer.go | 38 +- .../prometheus/prometheus/storage/fanout.go | 14 + .../prometheus/storage/interface.go | 26 + .../prometheus/storage/remote/codec.go | 3 +- .../storage/remote/metadata_watcher.go | 2 +- .../storage/remote/queue_manager.go | 190 +- .../prometheus/storage/remote/write.go | 5 + .../storage/remote/write_handler.go | 2 +- .../prometheus/prometheus/tsdb/block.go | 29 +- .../prometheus/prometheus/tsdb/blockwriter.go | 11 +- .../prometheus/tsdb/chunks/chunks.go | 2 +- .../prometheus/tsdb/chunks/head_chunks.go | 9 +- .../prometheus/prometheus/tsdb/compact.go | 125 +- .../prometheus/prometheus/tsdb/db.go | 192 +- .../prometheus/prometheus/tsdb/exemplar.go | 3 +- .../prometheus/prometheus/tsdb/head.go | 52 +- .../prometheus/prometheus/tsdb/head_append.go | 143 +- .../prometheus/prometheus/tsdb/head_read.go | 15 +- .../prometheus/prometheus/tsdb/head_wal.go | 105 +- .../prometheus/prometheus/tsdb/index/index.go | 49 +- .../prometheus/tsdb/index/postings.go | 125 +- .../prometheus/prometheus/tsdb/querier.go | 40 +- .../prometheus/tsdb/record/record.go | 37 +- .../prometheus/prometheus/tsdb/repair.go | 43 +- .../prometheus/tsdb/tsdbutil/histogram.go | 8 + .../prometheus/prometheus/tsdb/wal.go | 101 +- .../prometheus/tsdb/wlog/watcher.go | 52 +- .../prometheus/prometheus/tsdb/wlog/wlog.go | 4 + .../prometheus/prometheus/web/api/v1/api.go | 40 +- .../promql-engine/engine/distributed.go | 89 + .../thanos-io/promql-engine/engine/engine.go | 380 +--- .../thanos-io/promql-engine/engine/explain.go | 58 + .../thanos-io/promql-engine/engine/sort.go | 106 + .../execution/aggregate/accumulator.go | 156 +- .../execution/aggregate/hashaggregate.go | 36 +- .../execution/aggregate/khashaggregate.go | 85 +- .../execution/aggregate/scalar_table.go | 14 +- .../promql-engine/execution/binary/scalar.go | 36 +- .../promql-engine/execution/binary/vector.go | 44 +- .../execution/exchange/coalesce.go | 33 +- .../execution/exchange/concurrent.go | 28 +- .../promql-engine/execution/exchange/dedup.go | 34 +- .../execution/exchange/duplicate_label.go | 124 ++ .../promql-engine/execution/execution.go | 419 ++-- .../execution/function/absent.go | 44 +- .../execution/function/functions.go | 9 + .../execution/function/histogram.go | 56 +- .../promql-engine/execution/function/noarg.go | 28 +- .../execution/function/operator.go | 140 +- .../execution/function/relabel.go | 46 +- .../execution/function/scalar.go | 33 +- .../execution/function/timestamp.go | 91 + .../promql-engine/execution/model/operator.go | 52 +- .../execution/parse/functions.go | 3 + .../execution/remote/operator.go | 35 +- .../promql-engine/execution/scan/functions.go | 140 +- .../execution/scan/literal_selector.go | 26 +- .../execution/scan/matrix_selector.go | 168 +- .../promql-engine/execution/scan/subquery.go | 156 +- .../execution/scan/vector_selector.go | 51 +- .../step_invariant/step_invariant.go | 42 +- .../promql-engine/execution/unary/unary.go | 31 +- .../execution/warnings/context.go | 3 + .../promql-engine/extlabels/labels.go | 24 - .../promql-engine/logicalplan/distribute.go | 185 +- .../logicalplan/distribute_avg.go | 43 +- .../promql-engine/logicalplan/filter.go | 40 - .../logicalplan/merge_selects.go | 19 +- .../promql-engine/logicalplan/passthrough.go | 15 +- .../promql-engine/logicalplan/plan.go | 231 ++- .../logicalplan/propagate_selectors.go | 13 +- .../logicalplan/set_batch_size.go | 17 +- .../logicalplan/sort_matchers.go | 9 +- .../promql-engine/logicalplan/user_defined.go | 3 + .../thanos-io/promql-engine/query/options.go | 3 +- .../promql-engine/ringbuffer/ringbuffer.go | 76 + .../block/indexheader/lazy_binary_reader.go | 7 +- .../pkg/block/indexheader/reader_pool.go | 46 +- .../thanos/pkg/block/metadata/meta.go | 1 + .../thanos/pkg/clientconfig/config.go | 99 + .../thanos-io/thanos/pkg/clientconfig/grpc.go | 8 + .../pkg/{httpconfig => clientconfig}/http.go | 66 +- .../thanos-io/thanos/pkg/compact/compact.go | 12 +- .../thanos/pkg/component/component.go | 1 + .../thanos/pkg/errutil/multierror.go | 29 + .../thanos-io/thanos/pkg/httpconfig/config.go | 75 - .../thanos/pkg/promclient/promclient.go | 20 +- .../thanos-io/thanos/pkg/store/bucket.go | 44 +- .../thanos-io/thanos/pkg/store/prometheus.go | 4 +- .../thanos/pkg/store/storepb/prompb/custom.go | 12 + .../thanos-io/thanos/pkg/tenancy/tenancy.go | 100 +- .../bson/bsoncodec/codec_cache.go | 166 ++ .../bson/bsoncodec/default_value_decoders.go | 22 +- .../bson/bsoncodec/default_value_encoders.go | 56 +- .../bson/bsoncodec/pointer_codec.go | 59 +- .../mongo-driver/bson/bsoncodec/registry.go | 163 +- .../bson/bsoncodec/slice_codec.go | 2 +- .../bson/bsoncodec/struct_codec.go | 105 +- .../mongo-driver/bson/bsoncodec/types.go | 1 + .../mongo-driver/bson/bsonrw/copier.go | 8 +- .../mongo-driver/bson/bsonrw/value_reader.go | 12 +- .../mongo-driver/bson/bsonrw/value_writer.go | 74 +- .../mongo-driver/bson/bsontype/bsontype.go | 12 + .../mongo-driver/bson/marshal.go | 32 +- .../mongo-driver/bson/primitive_codecs.go | 18 +- .../go.mongodb.org/mongo-driver/bson/raw.go | 11 +- .../mongo-driver/bson/raw_value.go | 6 + .../go.mongodb.org/mongo-driver/bson/types.go | 1 + .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 5 +- .../collector/featuregate/README.md | 12 +- .../collector/featuregate/flag.go | 29 +- .../collector/featuregate/gate.go | 28 +- .../collector/featuregate/registry.go | 122 +- .../collector/featuregate/stage.go | 13 +- .../collector/pdata/pcommon/slice.go | 1 - .../pdata/pmetric/generated_exemplarslice.go | 1 - ...generated_exponentialhistogramdatapoint.go | 12 + ...ated_exponentialhistogramdatapointslice.go | 1 - .../generated_histogramdatapointslice.go | 1 - .../pdata/pmetric/generated_metricslice.go | 1 - .../pmetric/generated_numberdatapointslice.go | 1 - .../pmetric/generated_resourcemetricsslice.go | 1 - .../pmetric/generated_scopemetricsslice.go | 1 - .../generated_summarydatapointslice.go | 1 - ...ed_summarydatapointvalueatquantileslice.go | 1 - .../collector/pdata/pmetric/json.go | 4 + .../net/http/otelhttp/common.go | 4 +- .../net/http/otelhttp/config.go | 7 +- .../net/http/otelhttp/handler.go | 18 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 5 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 17 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 85 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 4 + vendor/go.opentelemetry.io/otel/Makefile | 29 +- vendor/go.opentelemetry.io/otel/README.md | 15 +- .../otel/baggage/baggage.go | 4 +- .../otel/bridge/opentracing/bridge.go | 15 +- .../otel/bridge/opentracing/provider.go | 3 + .../otel/bridge/opentracing/wrapper.go | 11 +- .../otel/exporters/otlp/otlptrace/README.md | 51 - .../otel/exporters/otlp/otlptrace/doc.go | 21 + .../otel/exporters/otlp/otlptrace/exporter.go | 6 +- .../otlp/otlptrace/otlptracegrpc/client.go | 22 +- .../otlp/otlptrace/otlptracegrpc/doc.go | 77 + .../internal/envconfig/envconfig.go | 4 +- .../internal/otlpconfig/options.go | 3 - .../otlp/otlptrace/otlptracegrpc/options.go | 8 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/internal/global/instruments.go | 60 +- .../otel/internal/global/trace.go | 7 + vendor/go.opentelemetry.io/otel/metric/doc.go | 2 +- .../otel/metric/instrument.go | 23 + .../otel/metric/syncfloat64.go | 10 +- .../otel/metric/syncint64.go | 10 +- .../otel/propagation/trace_context.go | 6 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/resource/auto.go | 10 +- .../otel/sdk/resource/env.go | 10 +- .../otel/sdk/resource/os.go | 7 +- .../otel/sdk/resource/process.go | 36 +- .../otel/sdk/trace/provider.go | 8 +- .../otel/sdk/trace/sampling.go | 4 +- .../otel/sdk/trace/span.go | 11 +- .../otel/sdk/trace/tracer.go | 3 + .../otel/sdk/trace/tracetest/span.go | 1 + .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/internal/http.go | 338 ++++ .../otel/semconv/v1.12.0/doc.go | 20 + .../otel/semconv/v1.12.0/exception.go | 20 + .../otel/semconv/v1.12.0/http.go | 114 ++ .../otel/semconv/v1.12.0/resource.go | 1042 ++++++++++ .../otel/semconv/v1.12.0/schema.go | 20 + .../otel/semconv/v1.12.0/trace.go | 1704 ++++++++++++++++ .../go.opentelemetry.io/otel/trace/config.go | 1 + vendor/go.opentelemetry.io/otel/trace/doc.go | 64 + .../otel/trace/embedded/embedded.go | 56 + vendor/go.opentelemetry.io/otel/trace/noop.go | 10 +- .../otel/trace/noop/noop.go | 118 ++ .../go.opentelemetry.io/otel/trace/trace.go | 40 +- .../otel/trace/tracestate.go | 38 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 7 +- vendor/go.uber.org/goleak/.golangci.yml | 28 + vendor/go.uber.org/goleak/CHANGELOG.md | 17 +- vendor/go.uber.org/goleak/Makefile | 50 +- vendor/go.uber.org/goleak/README.md | 4 +- vendor/go.uber.org/goleak/glide.yaml | 8 - .../go.uber.org/goleak/internal/stack/scan.go | 56 + .../goleak/internal/stack/stacks.go | 231 ++- vendor/go.uber.org/goleak/leaks.go | 6 + vendor/go.uber.org/goleak/options.go | 28 +- vendor/go.uber.org/goleak/tracestack_new.go | 10 +- vendor/golang.org/x/sys/execabs/execabs.go | 102 - .../golang.org/x/sys/execabs/execabs_go118.go | 17 - .../golang.org/x/sys/execabs/execabs_go119.go | 20 - vendor/golang.org/x/time/rate/rate.go | 2 + .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 2 +- .../x/tools/go/packages/packages.go | 8 +- .../x/tools/internal/gocommand/invoke.go | 3 +- .../internal/packagesinternal/packages.go | 8 - .../x/tools/internal/versions/gover.go | 172 ++ .../x/tools/internal/versions/types.go | 19 + .../x/tools/internal/versions/types_go121.go | 20 + .../x/tools/internal/versions/types_go122.go | 24 + .../tools/internal/versions/versions_go121.go | 49 + .../tools/internal/versions/versions_go122.go | 38 + .../api/internal/settings.go | 2 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 16 + vendor/google.golang.org/api/option/option.go | 13 + .../api/storage/v1/storage-api.json | 6 +- .../api/storage/v1/storage-gen.go | 4 +- vendor/k8s.io/apimachinery/LICENSE | 202 ++ .../apimachinery/pkg/util/runtime/runtime.go | 177 ++ vendor/k8s.io/client-go/LICENSE | 202 ++ vendor/k8s.io/client-go/tools/metrics/OWNERS | 5 + .../k8s.io/client-go/tools/metrics/metrics.go | 211 ++ .../util/workqueue/default_rate_limiters.go | 238 +++ .../util/workqueue/delaying_queue.go | 325 ++++ vendor/k8s.io/client-go/util/workqueue/doc.go | 26 + .../client-go/util/workqueue/metrics.go | 266 +++ .../client-go/util/workqueue/parallelizer.go | 101 + .../k8s.io/client-go/util/workqueue/queue.go | 328 ++++ .../util/workqueue/rate_limiting_queue.go | 119 ++ vendor/k8s.io/klog/v2/.gitignore | 17 + vendor/k8s.io/klog/v2/.golangci.yaml | 6 + vendor/k8s.io/klog/v2/CONTRIBUTING.md | 22 + vendor/k8s.io/klog/v2/LICENSE | 191 ++ vendor/k8s.io/klog/v2/OWNERS | 14 + vendor/k8s.io/klog/v2/README.md | 118 ++ vendor/k8s.io/klog/v2/RELEASE.md | 9 + vendor/k8s.io/klog/v2/SECURITY.md | 22 + vendor/k8s.io/klog/v2/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/v2/code-of-conduct.md | 3 + vendor/k8s.io/klog/v2/contextual.go | 212 ++ vendor/k8s.io/klog/v2/exit.go | 69 + vendor/k8s.io/klog/v2/format.go | 65 + vendor/k8s.io/klog/v2/imports.go | 38 + .../k8s.io/klog/v2/internal/buffer/buffer.go | 184 ++ .../k8s.io/klog/v2/internal/clock/README.md | 7 + vendor/k8s.io/klog/v2/internal/clock/clock.go | 161 ++ vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 + .../klog/v2/internal/serialize/keyvalues.go | 292 +++ .../internal/serialize/keyvalues_no_slog.go | 97 + .../v2/internal/serialize/keyvalues_slog.go | 155 ++ .../klog/v2/internal/severity/severity.go | 58 + .../internal/sloghandler/sloghandler_slog.go | 96 + vendor/k8s.io/klog/v2/k8s_references.go | 212 ++ vendor/k8s.io/klog/v2/k8s_references_slog.go | 39 + vendor/k8s.io/klog/v2/klog.go | 1729 +++++++++++++++++ vendor/k8s.io/klog/v2/klog_file.go | 130 ++ vendor/k8s.io/klog/v2/klog_file_others.go | 19 + vendor/k8s.io/klog/v2/klog_file_windows.go | 34 + vendor/k8s.io/klog/v2/klogr.go | 105 + vendor/k8s.io/klog/v2/klogr_slog.go | 96 + vendor/k8s.io/utils/LICENSE | 202 ++ vendor/k8s.io/utils/clock/README.md | 4 + vendor/k8s.io/utils/clock/clock.go | 178 ++ vendor/modules.txt | 149 +- 490 files changed, 32967 insertions(+), 5347 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go create mode 100644 vendor/github.com/alecthomas/units/renovate.json5 create mode 100644 vendor/github.com/bboreham/go-loser/.gitignore create mode 100644 vendor/github.com/bboreham/go-loser/LICENSE create mode 100644 vendor/github.com/bboreham/go-loser/README.md create mode 100644 vendor/github.com/bboreham/go-loser/tree.go delete mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml create mode 100644 vendor/github.com/fsnotify/fsnotify/.cirrus.yml create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 vendor/github.com/go-logr/logr/slogr/sloghandler.go create mode 100644 vendor/github.com/go-logr/logr/slogr/slogr.go create mode 100644 vendor/github.com/go-logr/logr/slogr/slogsink.go create mode 100644 vendor/github.com/go-openapi/runtime/client/auth_info.go create mode 100644 vendor/github.com/go-openapi/runtime/client/keepalive.go create mode 100644 vendor/github.com/go-openapi/runtime/client/opentelemetry.go create mode 100644 vendor/github.com/go-openapi/runtime/client/opentracing.go create mode 100644 vendor/github.com/go-openapi/runtime/client/request.go create mode 100644 vendor/github.com/go-openapi/runtime/client/response.go create mode 100644 vendor/github.com/go-openapi/runtime/client/runtime.go create mode 100644 vendor/github.com/go-openapi/runtime/yamlpc/yaml.go create mode 100644 vendor/github.com/hashicorp/consul/api/.copywrite.hcl create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/metalmatze/signal/LICENSE create mode 100644 vendor/github.com/metalmatze/signal/server/signalhttp/instrumentation.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/LICENSE create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/alerts.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/enforce.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/routes.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/rules.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/silences.go create mode 100644 vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/utils.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alert/alert_client.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/alertgroup_client.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/alertmanager_api_client.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/general/general_client.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/receiver_client.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_parameters.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_responses.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/client/silence/silence_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go create mode 100644 vendor/github.com/prometheus/common/model/metadata.go create mode 100644 vendor/github.com/prometheus/prometheus/discovery/metrics.go create mode 100644 vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go create mode 100644 vendor/github.com/prometheus/prometheus/discovery/util.go create mode 100644 vendor/github.com/thanos-io/promql-engine/engine/distributed.go create mode 100644 vendor/github.com/thanos-io/promql-engine/engine/explain.go create mode 100644 vendor/github.com/thanos-io/promql-engine/engine/sort.go create mode 100644 vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go create mode 100644 vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/logicalplan/filter.go create mode 100644 vendor/github.com/thanos-io/promql-engine/ringbuffer/ringbuffer.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/clientconfig/grpc.go rename vendor/github.com/thanos-io/thanos/pkg/{httpconfig => clientconfig}/http.go (85%) delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/httpconfig/config.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go create mode 100644 vendor/go.uber.org/goleak/.golangci.yml delete mode 100644 vendor/go.uber.org/goleak/glide.yaml create mode 100644 vendor/go.uber.org/goleak/internal/stack/scan.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go create mode 100644 vendor/k8s.io/apimachinery/LICENSE create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 vendor/k8s.io/client-go/LICENSE create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/delaying_queue.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/doc.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/metrics.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/parallelizer.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/queue.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go create mode 100644 vendor/k8s.io/klog/v2/.gitignore create mode 100644 vendor/k8s.io/klog/v2/.golangci.yaml create mode 100644 vendor/k8s.io/klog/v2/CONTRIBUTING.md create mode 100644 vendor/k8s.io/klog/v2/LICENSE create mode 100644 vendor/k8s.io/klog/v2/OWNERS create mode 100644 vendor/k8s.io/klog/v2/README.md create mode 100644 vendor/k8s.io/klog/v2/RELEASE.md create mode 100644 vendor/k8s.io/klog/v2/SECURITY.md create mode 100644 vendor/k8s.io/klog/v2/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/v2/code-of-conduct.md create mode 100644 vendor/k8s.io/klog/v2/contextual.go create mode 100644 vendor/k8s.io/klog/v2/exit.go create mode 100644 vendor/k8s.io/klog/v2/format.go create mode 100644 vendor/k8s.io/klog/v2/imports.go create mode 100644 vendor/k8s.io/klog/v2/internal/buffer/buffer.go create mode 100644 vendor/k8s.io/klog/v2/internal/clock/README.md create mode 100644 vendor/k8s.io/klog/v2/internal/clock/clock.go create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go create mode 100644 vendor/k8s.io/klog/v2/internal/severity/severity.go create mode 100644 vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go create mode 100644 vendor/k8s.io/klog/v2/k8s_references.go create mode 100644 vendor/k8s.io/klog/v2/k8s_references_slog.go create mode 100644 vendor/k8s.io/klog/v2/klog.go create mode 100644 vendor/k8s.io/klog/v2/klog_file.go create mode 100644 vendor/k8s.io/klog/v2/klog_file_others.go create mode 100644 vendor/k8s.io/klog/v2/klog_file_windows.go create mode 100644 vendor/k8s.io/klog/v2/klogr.go create mode 100644 vendor/k8s.io/klog/v2/klogr_slog.go create mode 100644 vendor/k8s.io/utils/LICENSE create mode 100644 vendor/k8s.io/utils/clock/README.md create mode 100644 vendor/k8s.io/utils/clock/clock.go diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 7221f45b15..c900e7a5f4 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -11,7 +11,7 @@ jobs: lint: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 + image: quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -40,7 +40,7 @@ jobs: test: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 + image: quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -59,7 +59,7 @@ jobs: build: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 + image: quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -190,14 +190,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 + image: quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -239,7 +239,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:update-go-1.21.3-e38685e50 + image: quay.io/cortexproject/build-image:upgrade-golangci-lint-2f73458a1 steps: - name: Checkout Repo uses: actions/checkout@v2 diff --git a/.golangci.yml b/.golangci.yml index c5c0da324e..77d8f8cab6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,12 +19,18 @@ linters-settings: exclude: ./.errcheck-exclude goimports: local-prefixes: "github.com/cortexproject/cortex" + revive: + severity: error # We only want critical issues. depguard: - list-type: blacklist - include-go-root: true - packages-with-error-message: - - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + rules: + main: + list-mode: lax + files: + - $all + deny: + - pkg: "github.com/go-kit/kit/log" + desc: Use github.com/go-kit/log instead of github.com/go-kit/kit/log" run: timeout: 5m diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 1cee501bfc..2c3e7a90d6 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -23,7 +23,7 @@ RUN GOARCH=$(go env GOARCH) && \ chmod +x shfmt && \ mv shfmt /usr/bin -RUN curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b /usr/bin v1.51.2 +RUN curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b /usr/bin v1.54.1 ENV HUGO_VERSION=v0.101.0 RUN go install github.com/client9/misspell/cmd/misspell@v0.3.4 &&\ diff --git a/go.mod b/go.mod index e1c64662ee..518e7e7ee2 100644 --- a/go.mod +++ b/go.mod @@ -4,19 +4,19 @@ go 1.21 require ( github.com/Masterminds/squirrel v1.5.4 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 github.com/alicebob/miniredis/v2 v2.30.4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.45.25 + github.com/aws/aws-sdk-go v1.48.14 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 - github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52 + github.com/cortexproject/promqlsmith v0.0.0-20240115062247-9ed0dfba956d github.com/dustin/go-humanize v1.0.1 github.com/efficientgo/core v1.0.0-rc.2 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/felixge/fgprof v0.9.3 github.com/go-kit/log v0.2.1 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.9 github.com/go-openapi/swag v0.22.4 github.com/go-redis/redis/v8 v8.11.5 github.com/gogo/protobuf v1.3.2 @@ -27,12 +27,12 @@ require ( github.com/gorilla/mux v1.8.1 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.25.1 + github.com/hashicorp/consul/api v1.26.1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.6 github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.3 + github.com/klauspost/compress v1.17.4 github.com/lib/pq v1.10.9 github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/go-wordwrap v1.0.1 @@ -42,34 +42,34 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.26.0 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.0 + github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11 + github.com/prometheus/prometheus v0.48.1-0.20240115084306-17920623e7cd github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.5 github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d - github.com/thanos-io/promql-engine v0.0.0-20231127105941-257543af55e8 - github.com/thanos-io/thanos v0.32.5-0.20231212162152-fc1a6edf7acc + github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 + github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.11 go.etcd.io/etcd/client/pkg/v3 v3.5.9 go.etcd.io/etcd/client/v3 v3.5.7 go.opentelemetry.io/contrib/propagators/aws v1.17.0 - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/bridge/opentracing v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/bridge/opentracing v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.18.0 + golang.org/x/net v0.19.0 golang.org/x/sync v0.5.0 - golang.org/x/time v0.4.0 + golang.org/x/time v0.5.0 google.golang.org/grpc v1.59.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -80,7 +80,7 @@ require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/cespare/xxhash/v2 v2.2.0 github.com/google/go-cmp v0.6.0 - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb google.golang.org/protobuf v1.31.0 ) @@ -90,9 +90,9 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.5 // indirect cloud.google.com/go/storage v1.35.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect @@ -111,6 +111,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect github.com/aws/smithy-go v1.13.3 // indirect + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -124,10 +125,10 @@ require ( github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd // indirect github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.4 // indirect @@ -142,7 +143,7 @@ require ( github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect + github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.4.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -155,6 +156,7 @@ require ( github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/serf v0.10.1 // indirect @@ -172,7 +174,8 @@ require ( github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/miekg/dns v1.1.56 // indirect + github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect + github.com/miekg/dns v1.1.57 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -185,9 +188,10 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus-community/prom-label-proxy v0.7.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.10.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/redis/rueidis v1.0.14-go1.18 // indirect github.com/rs/cors v1.9.0 // indirect github.com/rs/xid v1.5.0 // indirect @@ -202,39 +206,43 @@ require ( github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v1.1.0 // indirect github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/featuregate v0.77.0 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect - go.opentelemetry.io/collector/semconv v0.88.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/collector/featuregate v1.0.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0 // indirect + go.opentelemetry.io/collector/semconv v0.90.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.13.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/goleak v1.2.1 // indirect + go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/crypto v0.17.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.14.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.15.0 // indirect + golang.org/x/tools v0.16.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.150.0 // indirect + google.golang.org/api v0.153.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/telebot.v3 v3.1.3 // indirect + k8s.io/apimachinery v0.28.4 // indirect + k8s.io/client-go v0.28.4 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect ) // Override since git.apache.org is down. The docs say to fetch from github. diff --git a/go.sum b/go.sum index f32254aaee..9f853588ea 100644 --- a/go.sum +++ b/go.sum @@ -521,17 +521,16 @@ cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcP dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 h1:qgs/VAMSR+9qFhwTw4OwF2NbVuw+2m83pVZJjqkKQMw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= @@ -542,6 +541,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= @@ -569,8 +569,9 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= @@ -596,8 +597,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= -github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.48.14 h1:nVLrp+F84SG+xGiFMfe1TE6ZV6smF+42tuuNgYGV30s= +github.com/aws/aws-sdk-go v1.48.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= @@ -633,6 +634,8 @@ github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA= github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4= github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -686,8 +689,8 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52 h1:soTzgUC/F+qtsMbh/IWr3uGwPaXXYEUsTT3zYiXP0yM= -github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52/go.mod h1:8LOFLrqqVfNalbgjKZYdh6Bv/VXLdOV799aJwZJJGOs= +github.com/cortexproject/promqlsmith v0.0.0-20240115062247-9ed0dfba956d h1:VSA/aQtOyhIuxnL3YsI7/hdYhf1rWdmiJOcVPTO4dVg= +github.com/cortexproject/promqlsmith v0.0.0-20240115062247-9ed0dfba956d/go.mod h1:by/B++NlNa8Hp+d2054cy3YR4ijhPNGnuWjUWOUppms= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -699,14 +702,14 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw= github.com/dhui/dktest v0.3.16/go.mod h1:gYaA3LRmM8Z4vJl2MA0THIigJoZrwOansEOsp+kqxp0= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= -github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= +github.com/digitalocean/godo v1.106.0 h1:m5iErwl3xHovGFlawd50n54ntgXHt1BLsvU6BXsVxEU= +github.com/digitalocean/godo v1.106.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= -github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -755,14 +758,15 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= @@ -787,8 +791,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -817,8 +821,8 @@ github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -983,8 +987,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -1014,8 +1018,8 @@ github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56 github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -1038,11 +1042,11 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= -github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= +github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1145,8 +1149,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -1179,8 +1183,8 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.24.0 h1:zO+bMdTE6wPccqP7QIkbxAfACX7DjSX6DW9JE/qOKDQ= -github.com/linode/linodego v1.24.0/go.mod h1:cq/ty5BCEQnsO6OjMqD7Q03KCCyB8CNM5E3MNg0LV6M= +github.com/linode/linodego v1.25.0 h1:zYMz0lTasD503jBu3tSRhzEmXHQN1zptCw5o71ibyyU= +github.com/linode/linodego v1.25.0/go.mod h1:BMZI0pMM/YGjBis7pIXDPbcgYfCZLH0/UvzqtsGtG1c= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -1216,11 +1220,13 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -1315,11 +1321,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus-community/prom-label-proxy v0.7.0 h1:1iNHXF7V8z2iOCinEyxKDUHu2jppPAAd6PmBCi3naok= +github.com/prometheus-community/prom-label-proxy v0.7.0/go.mod h1:wR9C/Mwp5aBbiqM6gQ+FZdFRwL8pCzzhsje8lTAx/aA= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -1327,8 +1336,8 @@ github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrb github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1344,8 +1353,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 h1:V5ajRiLiCQGO+ggTr+07gMUcTqlIMMkDBfrJe5zKLmc= +github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1359,10 +1368,10 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11 h1:3wogiAxGft1+Syu99dswxhbWpR4IjLFKQgKxU84wrag= -github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11/go.mod h1:hCcxMXhfC04Ua9hQi2j43+EGMQhrRNvH8x0LteAnF1I= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.48.1-0.20240115084306-17920623e7cd h1:qq+8W4nYaar4QxY6JtYYVfUfO4AVNQcJ53Jaz3UZCMU= +github.com/prometheus/prometheus v0.48.1-0.20240115084306-17920623e7cd/go.mod h1:NCkyb7Sg5TsJxi6Hx9rCcAXZ2E1Bu7x0dYOQ0jLTfDU= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1444,10 +1453,10 @@ github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1 github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d h1:rTz1FIWYPS4R7H/hdZPaVRfevHdfv7BOjJ2FQG747KQ= github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d/go.mod h1:RMvJQnpB4QQiYGg1gF8mnPJg6IkIPY28Buh8f6b+F0c= -github.com/thanos-io/promql-engine v0.0.0-20231127105941-257543af55e8 h1:KX57eKPq3yzX7ENyd0+fOfPb6v9tjOCLRT8/9waWs/w= -github.com/thanos-io/promql-engine v0.0.0-20231127105941-257543af55e8/go.mod h1:vfXJv1JXNdLfHnjsHsLLJl5tyI7KblF76Wo5lZ9YC4Q= -github.com/thanos-io/thanos v0.32.5-0.20231212162152-fc1a6edf7acc h1:X4/ZoRAXG4n+15WxNJ3uwju0gKoPCzzyQwtKDtUOov0= -github.com/thanos-io/thanos v0.32.5-0.20231212162152-fc1a6edf7acc/go.mod h1:tADvTBUwVH/yjWymDztTV8/bErQVIGFlHIZqXc07QQo= +github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 h1:4PxgeNqMIWScmt8+oOjG6puF8Dhn2rPGOFBwuIf24SI= +github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856/go.mod h1:3pmodeI6v0zeezI1m9dE0ZaUXqiNSceZj1ZrQIXvHE4= +github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d h1:Y6Jmemwwascqa7nxSKjYF+yaSDtOVmYZ/cENTZAteuc= +github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d/go.mod h1:+trR/JNyXlLqPkJWMUm1pAnniJHCH3+jpLCZ0i9y6fo= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1499,8 +1508,8 @@ go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1510,14 +1519,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU= -go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= -go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= -go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= +go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/semconv v0.90.1 h1:2fkQZbefQBbIcNb9Rk1mRcWlFZgQOk7CpST1e1BK8eg= +go.opentelemetry.io/collector/semconv v0.90.1/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= go.opentelemetry.io/contrib/propagators/aws v1.17.0 h1:IX8d7l2uRw61BlmZBOTQFaK+y22j6vytMVTs9wFrO+c= @@ -1528,20 +1537,20 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 h1:+tVlvpiQMOCzi4EYCaBjbl go.opentelemetry.io/contrib/propagators/jaeger v1.13.0/go.mod h1:Qf7eVCLYawiNIB+A81kk8aFDFwYqXSqmt0N2RcvkLLI= go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYohdaBacQfojPYZJgATQ= go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/bridge/opentracing v1.19.0 h1:HCvsUi6uuhat/nAuxCl41A+OPxXXPxMNTRxKZx7hTW4= -go.opentelemetry.io/otel/bridge/opentracing v1.19.0/go.mod h1:n46h+7L/lcSuHhpqJQiUdb4eux19NNxTuWJ/ZMnIQMg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/bridge/opentracing v1.21.0 h1:7AfuSFhyvBmt/0YskcdxDyTdHPjQfrHcZQo6Zu5srF4= +go.opentelemetry.io/otel/bridge/opentracing v1.21.0/go.mod h1:giUOMajCV30LvlPHnzRDNBvDV3/NmrGVrqCp/1suDok= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1554,8 +1563,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1603,8 +1612,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1707,15 +1716,14 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1745,8 +1753,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1865,7 +1873,6 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1875,7 +1882,6 @@ golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= @@ -1905,8 +1911,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= -golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1979,8 +1985,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2058,8 +2064,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= -google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2199,12 +2205,12 @@ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2306,15 +2312,14 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= diff --git a/integration/backward_compatibility_test.go b/integration/backward_compatibility_test.go index 50194c8170..2ae0b07ad8 100644 --- a/integration/backward_compatibility_test.go +++ b/integration/backward_compatibility_test.go @@ -33,14 +33,6 @@ var ( } ) -func preCortex110Flags(flags map[string]string) map[string]string { - return e2e.MergeFlagsWithoutRemovingEmpty(flags, map[string]string{ - // Store-gateway "wait ring stability" has been introduced in 1.10.0 - "-store-gateway.sharding-ring.wait-stability-min-duration": "", - "-store-gateway.sharding-ring.wait-stability-max-duration": "", - }) -} - func TestBackwardCompatibilityWithBlocksStorage(t *testing.T) { for previousImage, flagsFn := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 79e446face..c7deb4f7ab 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -14,7 +14,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/cortexproject/cortex/pkg/util" ) @@ -158,26 +157,26 @@ func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // MetricMetadataMetricTypeToMetricType converts a metric type from our internal client // to a Prometheus one. -func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) textparse.MetricType { +func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) model.MetricType { switch mt { case UNKNOWN: - return textparse.MetricTypeUnknown + return model.MetricTypeUnknown case COUNTER: - return textparse.MetricTypeCounter + return model.MetricTypeCounter case GAUGE: - return textparse.MetricTypeGauge + return model.MetricTypeGauge case HISTOGRAM: - return textparse.MetricTypeHistogram + return model.MetricTypeHistogram case GAUGEHISTOGRAM: - return textparse.MetricTypeGaugeHistogram + return model.MetricTypeGaugeHistogram case SUMMARY: - return textparse.MetricTypeSummary + return model.MetricTypeSummary case INFO: - return textparse.MetricTypeInfo + return model.MetricTypeInfo case STATESET: - return textparse.MetricTypeStateset + return model.MetricTypeStateset default: - return textparse.MetricTypeUnknown + return model.MetricTypeUnknown } } diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index fed0870ba7..6fda91a84e 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -7,8 +7,8 @@ import ( "unsafe" jsoniter "github.com/json-iterator/go" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -75,22 +75,22 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { tc := []struct { desc string input MetricMetadata_MetricType - expected textparse.MetricType + expected model.MetricType }{ { desc: "with a single-word metric", input: COUNTER, - expected: textparse.MetricTypeCounter, + expected: model.MetricTypeCounter, }, { desc: "with a two-word metric", input: STATESET, - expected: textparse.MetricTypeStateset, + expected: model.MetricTypeStateset, }, { desc: "with an unknown metric", input: MetricMetadata_MetricType(100), - expected: textparse.MetricTypeUnknown, + expected: model.MetricTypeUnknown, }, } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 938584eba3..30e4d799d5 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -555,9 +555,7 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri histograms = make([]cortexpb.Histogram, 0, len(ts.Histograms)) // TODO(yeya24): we need to have validations for native histograms // at some point. Skip validations for now. - for _, h := range ts.Histograms { - histograms = append(histograms, h) - } + histograms = append(histograms, ts.Histograms...) } return cortexpb.PreallocTimeseries{ diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index 650d93f727..a60e6761f5 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -2,16 +2,17 @@ package querier import ( "context" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/mock" + "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/validation" ) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index d8170e44e2..c5c289aae9 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -72,6 +72,11 @@ func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v return 0, nil } +func (a *PusherAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is a no-op for PusherAppender as it happens during scrape time only. + return 0, nil +} + func (a *PusherAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { return 0, errors.New("exemplars are unsupported") } diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index 4e676acee0..87a40f869c 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -235,7 +235,7 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string, userManag _ = ot.GlobalTracer().Inject(sp.Context(), ot.HTTPHeaders, ot.HTTPHeadersCarrier(req.Header)) return ctxhttp.Do(ctx, client, req) }, - }, log.With(r.logger, "user", userID)) + }, log.With(r.logger, "user", userID), userManagerRegistry) n.run() diff --git a/pkg/ruler/notifier.go b/pkg/ruler/notifier.go index b8fa7536c4..e14236321c 100644 --- a/pkg/ruler/notifier.go +++ b/pkg/ruler/notifier.go @@ -11,6 +11,7 @@ import ( gklog "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -43,12 +44,12 @@ type rulerNotifier struct { logger gklog.Logger } -func newRulerNotifier(o *notifier.Options, l gklog.Logger) *rulerNotifier { +func newRulerNotifier(o *notifier.Options, l gklog.Logger, registerer prometheus.Registerer) *rulerNotifier { sdCtx, sdCancel := context.WithCancel(context.Background()) return &rulerNotifier{ notifier: notifier.NewManager(o, l), sdCancel: sdCancel, - sdManager: discovery.NewManager(sdCtx, l), + sdManager: discovery.NewManager(sdCtx, l, registerer), logger: l, } } diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go index 5a43ec8660..bd25f15dcf 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go @@ -53,7 +53,8 @@ func TestBlockIDsFetcher_Fetch(t *testing.T) { blockIds = append(blockIds, id) } }() - blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) + _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) + require.NoError(t, err) close(ch) wg.Wait() require.Equal(t, []ulid.ULID{block3.ID}, blockIds) @@ -104,7 +105,8 @@ func TestBlockIDsFetcherFetcher_Fetch_NoBucketIndex(t *testing.T) { blockIds = append(blockIds, id) } }() - blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) + _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) + require.NoError(t, err) close(ch) wg.Wait() require.Equal(t, []ulid.ULID{block1.ID, block2.ID, block3.ID}, blockIds) diff --git a/pkg/storage/tsdb/inmemory_index_cache_test.go b/pkg/storage/tsdb/inmemory_index_cache_test.go index aba9099a54..530d26575c 100644 --- a/pkg/storage/tsdb/inmemory_index_cache_test.go +++ b/pkg/storage/tsdb/inmemory_index_cache_test.go @@ -4,6 +4,12 @@ import ( "bytes" "context" "fmt" + "math/rand" + "strconv" + "strings" + "testing" + "time" + "github.com/efficientgo/core/testutil" "github.com/go-kit/log" "github.com/oklog/ulid" @@ -14,11 +20,6 @@ import ( "github.com/stretchr/testify/require" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/tenancy" - "math/rand" - "strconv" - "strings" - "testing" - "time" ) func TestInMemoryIndexCache_UpdateItem(t *testing.T) { diff --git a/pkg/storage/tsdb/users_scanner.go b/pkg/storage/tsdb/users_scanner.go index 2c0d463d25..0fe24395bf 100644 --- a/pkg/storage/tsdb/users_scanner.go +++ b/pkg/storage/tsdb/users_scanner.go @@ -4,10 +4,11 @@ import ( "context" "strings" - "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/thanos-io/objstore" + + "github.com/cortexproject/cortex/pkg/util" ) // AllUsers returns true to each call and should be used whenever the UsersScanner should not filter out diff --git a/pkg/tracing/migration/bridge_wrapper_test.go b/pkg/tracing/migration/bridge_wrapper_test.go index 24afb1409f..ea3375958c 100644 --- a/pkg/tracing/migration/bridge_wrapper_test.go +++ b/pkg/tracing/migration/bridge_wrapper_test.go @@ -9,6 +9,7 @@ import ( "github.com/weaveworks/common/httpgrpc" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" ) @@ -18,7 +19,7 @@ var ( spanID trace.SpanID = [8]byte{byte(11)} traceIDKey = "Traceid" spanIDKey = "Spanid" - noopTracer = trace.NewNoopTracerProvider().Tracer("") + noopTracer = noop.NewTracerProvider().Tracer("") noopSpan = func() trace.Span { _, s := noopTracer.Start(context.Background(), "") return s diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index b618676c59..aa30abf373 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,13 +1,43 @@ # Release History +## 1.9.0 (2023-11-06) + +### Breaking Changes +> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` +* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. +* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. + +### Bugs Fixed + +* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. +* Include error text instead of error type in traces when the transport returns an error. +* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. +* Block key and SAS authentication for non TLS protected endpoints. +* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. +* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. +* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. +* Context values created by `azcore` will no longer flow across disjoint HTTP requests. + +### Other Changes + +* Skip generating trace info for no-op tracers. +* The `clientName` paramater in client constructors has been renamed to `moduleName`. + +## 1.9.0-beta.1 (2023-10-05) + +### Other Changes + +* The beta features for tracing and fakes have been reinstated. + ## 1.8.0 (2023-10-05) ### Features Added -* Added `Claims` and `EnableCAE` fields to `policy.TokenRequestOptions`. -* ARM bearer token policy handles CAE challenges. -* `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) -* Added functions `FetcherForNextLink` and `EncodeQueryParams` along with `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. +* This includes the following features from `v1.8.0-beta.N` releases. + * Claims and CAE for authentication. + * New `messaging` package. + * Various helpers in the `runtime` package. + * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. * Added types `KeyCredential` and `SASCredential` to the `azcore` package. * Includes their respective constructor functions. * Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. @@ -24,11 +54,24 @@ ### Other Changes +* Updated dependencies. + +## 1.8.0-beta.3 (2023-09-07) + +### Features Added + +* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. + +### Bugs Fixed + +* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. + +### Other Changes + * The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. * `WithCaptureResponse` * `WithHTTPHeader` * `WithRetryOptions` -* Updated dependencies. ## 1.7.2 (2023-09-06) @@ -36,12 +79,41 @@ * Fix default HTTP transport to work in WASM modules. +## 1.8.0-beta.2 (2023-08-14) + +### Features Added + +* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. +* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. + +### Breaking Changes + +> This change affects only code written against beta version `v1.8.0-beta.1`. +* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. + +> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. +* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. + +### Bugs Fixed + +* Propagate any query parameters when constructing a fake poller and/or injecting next links. + ## 1.7.1 (2023-08-14) ## Bugs Fixed * Enable TLS renegotiation in the default transport policy. +## 1.8.0-beta.1 (2023-07-12) + +### Features Added + +- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) + +### Other Changes + +* The beta features for CAE, tracing, and fakes have been reinstated. + ## 1.7.0 (2023-07-12) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 9f051ba4ae..8eef8633a7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -84,7 +84,9 @@ func IsNullValue[T any](v T) bool { return false } -// ClientOptions contains configuration settings for a client's pipeline. +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions = policy.ClientOptions // Client is a basic HTTP client. It consists of a pipeline and tracing provider. @@ -93,22 +95,17 @@ type Client struct { tr tracing.Tracer // cached on the client to support shallow copying with new values - tp tracing.Provider - modVer string + tp tracing.Provider + modVer string + namespace string } // NewClient creates a new Client instance with the provided values. -// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider. -// if module and package are the same value, the "module/" prefix can be omitted. -// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. // - plOpts - pipeline configuration options; can be the zero-value // - options - optional client configurations; pass nil to accept the default values -func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { - mod, client, err := shared.ExtractModuleName(clientName) - if err != nil { - return nil, err - } - +func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { if options == nil { options = &ClientOptions{} } @@ -119,15 +116,19 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, } } - pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options) + pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) - tr := options.TracingProvider.NewTracer(client, moduleVersion) + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + if tr.Enabled() && plOpts.Tracing.Namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) + } return &Client{ - pl: pl, - tr: tr, - tp: options.TracingProvider, - modVer: moduleVersion, + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + namespace: plOpts.Tracing.Namespace, }, nil } @@ -146,5 +147,8 @@ func (c *Client) Tracer() tracing.Tracer { // - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans func (c *Client) WithClientName(clientName string) *Client { tr := c.tp.NewTracer(clientName, c.modVer) - return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer} + if tr.Enabled() && c.namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) + } + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go index 28c64678c7..654a5f4043 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -253,5 +253,12 @@ When resuming a poller, no IO is performed, and zero-value arguments can be used Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO BeginA() will result in an error. + +# Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. */ package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index e793b31db2..f2b296b6dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -8,6 +8,8 @@ package exported import ( "context" + "encoding/base64" + "fmt" "io" "net/http" "sync/atomic" @@ -78,6 +80,38 @@ type TokenCredential interface { GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) } +// DecodeByteArray will base-64 decode the provided string into v. +// Exported as runtime.DecodeByteArray() +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} + // KeyCredential contains an authentication key used to authenticate to an Azure service. // Exported as azcore.KeyCredential. type KeyCredential struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go index c44efd6eff..e45f831ed2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -8,10 +8,7 @@ package exported import ( "errors" - "fmt" "net/http" - - "golang.org/x/net/http/httpguts" ) // Policy represents an extensibility point for the Pipeline that can mutate the specified @@ -75,23 +72,6 @@ func (p Pipeline) Do(req *Request) (*http.Response, error) { if req == nil { return nil, errors.New("request cannot be nil") } - // check copied from Transport.roundTrip() - for k, vv := range req.Raw().Header { - if !httpguts.ValidHeaderFieldName(k) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) - } - } - } req.policies = p.policies return req.Next() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index fa99d1b7ed..659f2a7d2e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -8,6 +8,7 @@ package exported import ( "context" + "encoding/base64" "errors" "fmt" "io" @@ -18,6 +19,28 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" ) +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +// Exported as runtime.Base64Encoding +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// EncodeByteArray will base-64 encode the byte slice v. +// Exported as runtime.EncodeByteArray() +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + // Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. // Don't use this type directly, use NewRequest() instead. // Exported as policy.Request. @@ -170,6 +193,14 @@ func (req *Request) Clone(ctx context.Context) *Request { return &r2 } +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + // not exported but dependent on Request // PolicyFunc is a type that implements the Policy interface. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 7df2f88c1c..f243552885 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -25,7 +26,7 @@ func NewResponseError(resp *http.Response) error { } // prefer the error code in the response header - if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { respErr.ErrorCode = ec return respErr } @@ -112,33 +113,45 @@ type ResponseError struct { // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.RawResponse != nil { + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, separator) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + } else { + fmt.Fprintln(msg, "Missing RawResponse") + fmt.Fprintln(msg, separator) + } if e.ErrorCode != "" { fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) } else { fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - body, err := exported.Payload(e.RawResponse, nil) - if err != nil { - // this really shouldn't fail at this point as the response - // body is already cached (it was read in NewResponseError) - fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - if err := json.Indent(msg, body, "", " "); err != nil { - // failed to pretty-print so just dump it verbatim - fmt.Fprint(msg, string(body)) + if e.RawResponse != nil { + fmt.Fprintln(msg, separator) + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") } - // the standard library doesn't have a pretty-printer for XML - fmt.Fprintln(msg) - } else { - fmt.Fprintln(msg, "Response contained no body") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintln(msg, separator) return msg.String() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 0000000000..2598347186 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// lroStatusURLSuffix is the URL path suffix for a faked LRO. +const lroStatusURLSuffix = "/get/fake/status" + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + qp := "" + if resp.Request.URL.RawQuery != "" { + qp = "?" + resp.Request.URL.RawQuery + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() + FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) +} + +// SanitizePollerPath removes any fake-appended suffix from a URL's path. +func SanitizePollerPath(path string) string { + return strings.TrimSuffix(path, lroStatusURLSuffix) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 05e53aa76e..272f06155e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -7,8 +7,9 @@ package shared const ( - ContentTypeAppJSON = "application/json" - ContentTypeAppXML = "application/xml" + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" + ContentTypeTextPlain = "text/plain" ) const ( @@ -17,20 +18,25 @@ const ( HeaderAzureAsync = "Azure-AsyncOperation" HeaderContentLength = "Content-Length" HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" ) const BearerTokenPrefix = "Bearer " +const TracingNamespaceAttrName = "az.namespace" + const ( // Module is the name of the calling module used in telemetry data. Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.8.0" + Version = "v1.9.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index 1bf3aca918..16bc105f48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -14,10 +14,11 @@ import ( "regexp" "strconv" "time" - - "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" ) +// NOTE: when adding a new context key type, it likely needs to be +// added to the deny-list of key types in ContextWithDeniedValues + // CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. type CtxWithHTTPHeaderKey struct{} @@ -27,6 +28,12 @@ type CtxWithRetryOptionsKey struct{} // CtxWithCaptureResponse is used as a context key for retrieving the raw response. type CtxWithCaptureResponse struct{} +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} + // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { select { @@ -80,49 +87,21 @@ func ValidateModVer(moduleVersion string) error { return nil } -// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or -// "package", "package.Client" from "package.Client" when there's no "module/" prefix. -// If clientName is malformed, an error is returned. -func ExtractModuleName(clientName string) (string, string, error) { - // uses unnamed capturing for "module", "package.Client", and "package" - regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`) - if err != nil { - return "", "", err - } - - matches := regex.FindStringSubmatch(clientName) - if len(matches) < 4 { - return "", "", fmt.Errorf("malformed clientName %s", clientName) - } - - // the first match is the entire string, the second is "module", the third is - // "package.Client" and the fourth is "package". - // if there was no "module/" prefix, the second match will be the empty string - if matches[1] != "" { - return matches[1], matches[2], nil - } - return matches[3], matches[2], nil -} - -// NonRetriableError marks the specified error as non-retriable. -func NonRetriableError(err error) error { - return &nonRetriableError{err} -} - -type nonRetriableError struct { - error -} - -func (p *nonRetriableError) Error() string { - return p.error.Error() +// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. +// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's +// context with an instance of this type. This is to prevent context values from flowing across disjoint +// requests which can have unintended side-effects. +type ContextWithDeniedValues struct { + context.Context } -func (*nonRetriableError) NonRetriable() { - // marker method -} - -func (p *nonRetriableError) Unwrap() error { - return p.error +// Value implements part of the [context.Context] interface. +// It acts as a deny-list for certain context keys. +func (c *ContextWithDeniedValues) Value(key any) any { + switch key.(type) { + case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: + return nil + default: + return c.Context.Value(key) + } } - -var _ errorinfo.NonRetriable = (*nonRetriableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index f73704cf01..d934f1dc5f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -29,9 +29,11 @@ type Transporter = exported.Transporter type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. -// All zero-value fields will be initialized with default values. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions struct { - // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. APIVersion string // Cloud specifies a cloud for the client. The default is Azure Public Cloud. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index 8a2e6c61ed..cffe692d7e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -10,9 +10,12 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "reflect" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // PagingHandler contains the required data for constructing a Pager. @@ -23,12 +26,16 @@ type PagingHandler[T any] struct { // Fetcher fetches the first and subsequent pages. Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer } // Pager provides operations for iterating over paged responses. type Pager[T any] struct { current *T handler PagingHandler[T] + tracer tracing.Tracer firstPage bool } @@ -37,6 +44,7 @@ type Pager[T any] struct { func NewPager[T any](handler PagingHandler[T]) *Pager[T] { return &Pager[T]{ handler: handler, + tracer: handler.Tracer, firstPage: true, } } @@ -51,8 +59,6 @@ func (p *Pager[T]) More() bool { // NextPage advances the pager to the next page. func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { - var resp T - var err error if p.current != nil { if p.firstPage { // we get here if it's an LRO-pager, we already have the first page @@ -61,12 +67,16 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { } else if !p.handler.More(*p.current) { return *new(T), errors.New("no more pages") } - resp, err = p.handler.Fetcher(ctx, p.current) } else { // non-LRO case, first page p.firstPage = false - resp, err = p.handler.Fetcher(ctx, nil) } + + var err error + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err := p.handler.Fetcher(ctx, p.current) if err != nil { return *new(T), err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go index 9d9288f53d..6b1f5c083e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -13,9 +13,35 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { - AllowedHeaders, AllowedQueryParameters []string - APIVersion APIVersionOptions - PerCall, PerRetry []policy.Policy + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // Tracing contains options used to configure distributed tracing. + Tracing TracingOptions +} + +// TracingOptions contains tracing options for SDK developers. +type TracingOptions struct { + // Namespace contains the value to use for the az.namespace span attribute. + Namespace string } // Pipeline represents a primitive for sending HTTP requests and receiving responses. @@ -56,8 +82,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy policies = append(policies, NewRetryPolicy(&cp.Retry)) policies = append(policies, plOpts.PerRetry...) policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) policies = append(policies, NewLogPolicy(&cp.Logging)) - policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) transport := cp.Transport if transport == nil { transport = defaultHTTPClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index ff4931cd24..f0f2803559 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -35,7 +35,7 @@ type acquiringResourceState struct { // acquire acquires or updates the resource; only one // thread/goroutine at a time ever calls this function func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { - tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) if err != nil { return exported.AccessToken{}, time.Time{}, err } @@ -73,9 +73,17 @@ func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(p // Do authorizes a request with a bearer token func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { - if strings.ToLower(req.Raw().URL.Scheme) != "https" { - return nil, shared.NonRetriableError(errors.New("bearer token authentication is not permitted for non TLS protected (https) endpoints")) + // skip adding the authorization header if no TokenCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if b.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req); err != nil { + return nil, err } + var err error if b.authzHandler.OnRequest != nil { err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) @@ -83,7 +91,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) } if err != nil { - return nil, ensureNonRetriable(err) + return nil, errorinfo.NonRetriableError(err) } res, err := req.Next() @@ -99,22 +107,15 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { } } } - return res, ensureNonRetriable(err) -} - -func ensureNonRetriable(err error) error { - var nre errorinfo.NonRetriable - if err != nil && !errors.As(err, &nre) { - err = btpError{err} + if err != nil { + err = errorinfo.NonRetriableError(err) } - return err + return res, err } -// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize -type btpError struct { - error +func checkHTTPSForAuth(req *policy.Request) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil } - -func (btpError) NonRetriable() {} - -var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 0000000000..3df1c12189 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,143 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + // calling *url.Error.Error() will include the unsanitized URL + // which we don't want. in addition, we already have the HTTP verb + // and sanitized URL in the trace so we aren't losing any info + err = urlErr.Err + } + span.SetStatus(tracing.SpanStatusError, err.Error()) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // for future expansion +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + + // we MUST propagate the active tracer before returning so that the trace policy can access it + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + + const newSpanKind = tracing.SpanKindInternal + if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { + // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), + // then the span for Bar() must be suppressed. however, if Bar() makes a REST + // call, then Bar's HTTP span must be a child of Foo's span. + // however, there is an exception to this rule. if the SDK method Foo() is a + // messaging producer/consumer, and it takes a callback that's a SDK method + // Bar(), then the span for Bar() must _not_ be suppressed. + if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { + return ctx, func(err error) {} + } + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: newSpanKind, + }) + ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} + +// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. +type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go index 2e47a5bad0..6f577fa7a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -40,10 +40,18 @@ func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options // Do implementes the Do method on the [policy.Polilcy] interface. func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { - val := exported.KeyCredentialGet(k.cred) - if k.prefix != "" { - val = k.prefix + val + // skip adding the authorization header if no KeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) } - req.Raw().Header.Add(k.header, val) return req.Next() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go index 8514f57d5c..f048d7fb53 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -191,7 +191,8 @@ func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { } sort.Strings(keys) for _, k := range keys { - value := header.Get(k) + // don't use Get() as it will canonicalize k which might cause a mismatch + value := header[k][0] // redact all header values not in the allow-list if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { value = redactedValue diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 21fcb39682..04d7bb4ecb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,15 +59,7 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - pow := func(number int64, exponent int32) int64 { // pow is nested helper function - var result int64 = 1 - for n := int32(0); n < exponent; n++ { - result *= number - } - return result - } - - delay := time.Duration(pow(2, try)-1) * o.RetryDelay + delay := time.Duration((1< -1 { + mod = mod[i+1:] + } b.WriteString(formatTelemetry(mod, ver)) b.WriteRune(' ') b.WriteString(platformInfo) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 3d029a3d15..c373f68962 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -13,6 +13,8 @@ import ( "flag" "fmt" "net/http" + "reflect" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -20,9 +22,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" ) @@ -54,6 +58,9 @@ type NewPollerOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPoller creates a Poller based on the provided initial response. @@ -70,6 +77,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: options.Handler, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -83,7 +91,9 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol // determine the polling method var opr PollingHandler[T] var err error - if async.Applicable(resp) { + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { // async poller must be checked first as it can also have a location header opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { @@ -110,6 +120,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: opr, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -121,6 +132,9 @@ type NewPollerFromResumeTokenOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPollerFromResumeToken creates a Poller from a resume token string. @@ -147,7 +161,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options opr := options.Handler // now rehydrate the poller based on the encoded poller type - if opr != nil { + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) } else if async.CanResume(asJSON) { opr, _ = async.New[T](pl, nil, "") @@ -166,6 +182,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options return &Poller[T]{ op: opr, result: result, + tracer: options.Tracer, }, nil } @@ -188,6 +205,7 @@ type Poller[T any] struct { resp *http.Response err error result *T + tracer tracing.Tracer done bool } @@ -203,7 +221,7 @@ type PollUntilDoneOptions struct { // options: pass nil to accept the default values. // NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might // benefit from a shorter or longer duration. -func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { if options == nil { options = &PollUntilDoneOptions{} } @@ -212,9 +230,13 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt cp.Frequency = 30 * time.Second } + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + // skip the floor check when executing tests so they don't take so long if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { - return *new(T), errors.New("polling frequency minimum is one second") + err = errors.New("polling frequency minimum is one second") + return } start := time.Now() @@ -226,22 +248,24 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // initial check for a retry-after header existing on the initial response if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) - if err := shared.Delay(ctx, retryAfter); err != nil { + if err = shared.Delay(ctx, retryAfter); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } // begin polling the endpoint until a terminal state is reached for { - resp, err := p.Poll(ctx) + var resp *http.Response + resp, err = p.Poll(ctx) if err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } if p.Done() { logPollUntilDoneExit("succeeded") - return p.Result(ctx) + res, err = p.Result(ctx) + return } d := cp.Frequency if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { @@ -252,7 +276,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } if err = shared.Delay(ctx, d); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } @@ -261,17 +285,22 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // If Poll succeeds, the poller's state is updated and the HTTP response is returned. // If Poll fails, the poller's state is unmodified and the error is returned. // Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { if p.Done() { // the LRO has reached a terminal state, don't poll again - return p.resp, nil + resp = p.resp + return } - resp, err := p.op.Poll(ctx) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) if err != nil { - return nil, err + return } p.resp = resp - return p.resp, nil + return } // Done returns true if the LRO has reached a terminal state. @@ -284,31 +313,40 @@ func (p *Poller[T]) Done() bool { // If the LRO completed successfully, a populated instance of T is returned. // If the LRO failed or was canceled, an *azcore.ResponseError error is returned. // Calling this on an LRO in a non-terminal state will return an error. -func (p *Poller[T]) Result(ctx context.Context) (T, error) { +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { if !p.Done() { - return *new(T), errors.New("poller is in a non-terminal state") + err = errors.New("poller is in a non-terminal state") + return } if p.done { // the result has already been retrieved, return the cached value if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } - err := p.op.Result(ctx, p.result) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { // the LRO failed. record the error p.err = err } else if err != nil { // the call to Result failed, don't cache anything in this case - return *new(T), err + return } p.done = true if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } // ResumeToken returns a value representing the poller that can be used to resume @@ -325,3 +363,22 @@ func (p *Poller[T]) ResumeToken() (string, error) { } return tk, err } + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index b7e6fb26f9..e97223da29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -9,18 +9,14 @@ package runtime import ( "bytes" "context" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "mime/multipart" "net/url" - "os" "path" - "reflect" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -29,14 +25,14 @@ import ( // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when // encoding/decoding a slice of bytes to/from a string. -type Base64Encoding int +type Base64Encoding = exported.Base64Encoding const ( // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. - Base64StdFormat Base64Encoding = 0 + Base64StdFormat Base64Encoding = exported.Base64StdFormat // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. - Base64URLFormat Base64Encoding = 1 + Base64URLFormat Base64Encoding = exported.Base64URLFormat ) // NewRequest creates a new policy.Request with the specified input. @@ -93,10 +89,7 @@ func JoinPaths(root string, paths ...string) string { // EncodeByteArray will base-64 encode the byte slice v. func EncodeByteArray(v []byte, format Base64Encoding) string { - if format == Base64URLFormat { - return base64.RawURLEncoding.EncodeToString(v) - } - return base64.StdEncoding.EncodeToString(v) + return exported.EncodeByteArray(v, format) } // MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. @@ -109,9 +102,6 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. func MarshalAsJSON(req *policy.Request, v interface{}) error { - if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { - v = cloneWithoutReadOnlyFields(v) - } b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -183,80 +173,5 @@ func SkipBodyDownload(req *policy.Request) { req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) } -// returns a clone of the object graph pointed to by v, omitting values of all read-only -// fields. if there are no read-only fields in the object graph, no clone is created. -func cloneWithoutReadOnlyFields(v interface{}) interface{} { - val := reflect.Indirect(reflect.ValueOf(v)) - if val.Kind() != reflect.Struct { - // not a struct, skip - return v - } - // first walk the graph to find any R/O fields. - // if there aren't any, skip cloning the graph. - if !recursiveFindReadOnlyField(val) { - return v - } - return recursiveCloneWithoutReadOnlyFields(val) -} - -// returns true if any field in the object graph of val contains the `azure:"ro"` tag value -func recursiveFindReadOnlyField(val reflect.Value) bool { - t := val.Type() - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - return true - } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { - return true - } - } - return false -} - -// clones the object graph of val. all non-R/O properties are copied to the clone -func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { - t := val.Type() - clone := reflect.New(t) - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - // omit from payload - continue - } - // clone field will receive the same value as the source field... - value := val.Field(i) - v := reflect.Indirect(value) - if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { - // ...unless the source value is a struct, in which case we recurse to clone that struct. - // (We can't recursively clone time.Time because it contains unexported fields.) - c := recursiveCloneWithoutReadOnlyFields(v) - if field.Anonymous { - // NOTE: this does not handle the case of embedded fields of unexported struct types. - // this should be ok as we don't generate any code like this at present - value = reflect.Indirect(reflect.ValueOf(c)) - } else { - value = reflect.ValueOf(c) - } - } - reflect.Indirect(clone).Field(i).Set(value) - } - return clone.Interface() -} - -// returns true if the "azure" tag contains the option "ro" -func azureTagIsReadOnly(tag string) bool { - if tag == "" { - return false - } - parts := strings.Split(tag, ",") - for _, part := range parts { - if part == "ro" { - return true - } - } - return false -} +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index d1f58e9e29..003c875b1f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -8,13 +8,13 @@ package runtime import ( "bytes" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "net/http" + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -105,31 +105,5 @@ func removeBOM(resp *http.Response) error { // DecodeByteArray will base-64 decode the provided string into v. func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { - if len(s) == 0 { - return nil - } - payload := string(s) - if payload[0] == '"' { - // remove surrounding quotes - payload = payload[1 : len(payload)-1] - } - switch format { - case Base64StdFormat: - decoded, err := base64.StdEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - case Base64URLFormat: - // use raw encoding as URL format should not contain any '=' characters - decoded, err := base64.RawURLEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - default: - return fmt.Errorf("unrecognized byte array format: %d", format) - } + return azexported.DecodeByteArray(s, v, format) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go index 589d09f2c0..2124c1d48b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -11,6 +11,8 @@ import ( "net" "net/http" "time" + + "golang.org/x/net/http2" ) var defaultHTTPClient *http.Client @@ -24,6 +26,7 @@ func init() { }), ForceAttemptHTTP2: true, MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, @@ -32,6 +35,13 @@ func init() { Renegotiation: tls.RenegotiateFreelyAsClient, }, } + // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed + if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { + // if the connection has been idle for 10 seconds, send a ping frame for a health check + http2Transport.ReadIdleTimeout = 10 * time.Second + // if there's no response to the ping within the timeout, the connection will be closed + http2Transport.PingTimeout = 5 * time.Second + } defaultHTTPClient = &http.Client{ Transport: defaultTransport, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go index 75f757cedd..1ade7c560f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -31,12 +31,12 @@ type Provider struct { newTracerFn func(name, version string) Tracer } -// NewTracer creates a new Tracer for the specified name and version. -// - name - the name of the tracer object, typically the fully qualified name of the service client -// - version - the version of the module in which the service client resides -func (p Provider) NewTracer(name, version string) (tracer Tracer) { +// NewTracer creates a new Tracer for the specified module name and version. +// - module - the fully qualified name of the module +// - version - the version of the module +func (p Provider) NewTracer(module, version string) (tracer Tracer) { if p.newTracerFn != nil { - tracer = p.newTracerFn(name, version) + tracer = p.newTracerFn(module, version) } return } @@ -45,21 +45,28 @@ func (p Provider) NewTracer(name, version string) (tracer Tracer) { // TracerOptions contains the optional values when creating a Tracer. type TracerOptions struct { - // for future expansion + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) Span } // NewTracer creates a Tracer with the specified values. // - newSpanFn is the underlying implementation for creating Span instances // - options contains optional values; pass nil to accept the default value func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } return Tracer{ - newSpanFn: newSpanFn, + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, } } // Tracer is the factory that creates Span instances. type Tracer struct { - newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) Span } // Start creates a new span and a context.Context that contains it. @@ -68,11 +75,37 @@ type Tracer struct { // - options contains optional values for the span, pass nil to accept any defaults func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { if t.newSpanFn != nil { - return t.newSpanFn(ctx, spanName, options) + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) } return ctx, Span{} } +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) Span { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{} +} + // SpanOptions contains optional settings for creating a span. type SpanOptions struct { // Kind indicates the kind of Span. @@ -97,9 +130,6 @@ type SpanImpl struct { // AddEvent contains the implementation for the Span.AddEvent method. AddEvent func(string, ...Attribute) - // AddError contains the implementation for the Span.AddError method. - AddError func(err error) - // SetStatus contains the implementation for the Span.SetStatus method. SetStatus func(SpanStatus, string) } @@ -140,13 +170,6 @@ func (s Span) AddEvent(name string, attrs ...Attribute) { } } -// AddError adds the specified error event to the span. -func (s Span) AddError(err error) { - if s.impl.AddError != nil { - s.impl.AddError(err) - } -} - // SetStatus sets the status on the span along with a description. func (s Span) SetStatus(code SpanStatus, desc string) { if s.impl.SetStatus != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go index ade7b348e3..8ee66b5267 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -14,3 +14,33 @@ type NonRetriable interface { error NonRetriable() } + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5 new file mode 100644 index 0000000000..897864b852 --- /dev/null +++ b/vendor/github.com/alecthomas/units/renovate.json5 @@ -0,0 +1,11 @@ +{ + $schema: "https://docs.renovatebot.com/renovate-schema.json", + extends: [ + "config:recommended", + ":semanticCommits", + ":semanticCommitTypeAll(chore)", + ":semanticCommitScope(deps)", + "group:allNonMajor", + "schedule:earlyMondays", // Run once a week. + ], +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 785f30d8e6..329f788a38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -31,6 +31,8 @@ package endpointcreds import ( "encoding/json" + "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -69,7 +71,37 @@ type Provider struct { // Optional authorization token value if set will be used as the value of // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error req := p.Client.NewRequest(op, nil, out) req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index e39903284d..1ba80b5760 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "io/ioutil" "net" "net/http" "net/url" @@ -115,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro const ( httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + // RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { @@ -135,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P var lookupHostFn = net.LookupHost -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil } - // Host is not an ip, perform lookup addrs, err := lookupHostFn(host) if err != nil { return false, err } + for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { return false, nil } } @@ -155,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) { return true, nil } +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string @@ -165,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) host := aws.URLHostname(parsed) if len(host) == 0 { errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } } } @@ -190,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 5f97b40a61..ee1ea9b6a6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -755,6 +755,13 @@ var awsPartition = partition{ }, }, }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1040,6 +1047,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1818,6 +1840,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1827,12 +1855,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3145,6 +3188,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3157,6 +3203,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3947,6 +3999,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -3971,6 +4029,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -3986,15 +4089,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, }, }, "autoscaling-plans": service{ @@ -4492,6 +4619,14 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4524,6 +4659,14 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4572,6 +4715,9 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -6165,15 +6311,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6195,6 +6353,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6249,6 +6410,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6944,6 +7111,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -6960,6 +7135,22 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -6976,6 +7167,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -6992,6 +7191,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -7016,6 +7223,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -7396,6 +7619,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7524,6 +7750,18 @@ var awsPartition = partition{ }, }, }, + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "cur": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -11312,6 +11550,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11525,6 +11769,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -11534,6 +11781,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11606,6 +11856,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11652,6 +11905,9 @@ var awsPartition = partition{ }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -11661,6 +11917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -11670,6 +11929,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11685,6 +11947,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12321,12 +12586,27 @@ var awsPartition = partition{ }, "finspace": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -13428,16 +13708,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "gamesparks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "geo": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15698,12 +15968,45 @@ var awsPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "api-ap-southeast-1", }: endpoint{ @@ -15752,6 +16055,30 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "data-ap-southeast-1", }: endpoint{ @@ -17777,6 +18104,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18070,6 +18400,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18118,6 +18451,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -18534,46 +18870,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "macie": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - }, - }, - }, "macie2": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -19082,6 +19378,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -19134,6 +19433,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -19186,6 +19488,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -21130,12 +21435,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22181,6 +22495,161 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, "qldb": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23605,6 +24074,16 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -23670,6 +24149,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.eu-north-1.api.aws", }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -23690,6 +24174,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.il-central-1.api.aws", }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -26012,6 +26501,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -26021,15 +26513,24 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -26407,6 +26908,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27108,6 +27612,38 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -27150,6 +27686,166 @@ var awsPartition = partition{ }: endpoint{ Hostname: "signer-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "simspaceweaver": service{ @@ -29625,6 +30321,31 @@ var awsPartition = partition{ }, }, }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "tnb": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30321,6 +31042,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -32257,6 +32981,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -33252,9 +33979,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "emr-containers": service{ @@ -33841,6 +34580,31 @@ var awscnPartition = partition{ }, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33881,6 +34645,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "resource-explorer-2": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34182,6 +34953,22 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sms": service{ @@ -34287,9 +35074,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "storagegateway": service{ @@ -35063,12 +35862,42 @@ var awsusgovPartition = partition{ }, "appconfigdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -35203,6 +36032,16 @@ var awsusgovPartition = partition{ }, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36323,6 +37162,46 @@ var awsusgovPartition = partition{ }, }, }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "ds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36778,6 +37657,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -36789,6 +37674,13 @@ var awsusgovPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -37198,21 +38090,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, }, }, "greengrass": service{ @@ -38047,21 +38963,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, }, }, "lambda": service{ @@ -38792,6 +39732,31 @@ var awsusgovPartition = partition{ }, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, "quicksight": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39033,6 +39998,46 @@ var awsusgovPartition = partition{ }, }, }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "resource-explorer-2": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -39830,20 +40835,40 @@ var awsusgovPartition = partition{ "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", }, }, }, @@ -40062,6 +41087,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -40070,6 +41113,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "states": service{ @@ -40979,6 +42040,28 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -41101,6 +42184,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ec2": service{ @@ -41492,22 +42578,136 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-iso-east-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-iso-west-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "resource-groups": service{ @@ -41792,6 +42992,13 @@ var awsisobPartition = partition{ }, }, }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -41830,6 +43037,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42213,16 +43427,73 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-isob-east-1", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "resource-groups": service{ @@ -42253,6 +43524,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d6fa24776c..93bb5de647 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -171,6 +171,12 @@ type envConfig struct { // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -251,6 +257,9 @@ var ( ec2IMDSEndpointModeEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { return envConfig{}, err } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { return cfg, err @@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) { } } +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { for _, k := range keys { value := os.Getenv(k) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 8127c99a9a..3c88dee526 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -779,6 +779,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) + } + cfg.S3UseARNRegion = userCfg.S3UseARNRegion if cfg.S3UseARNRegion == nil { cfg.S3UseARNRegion = &envCfg.S3UseARNRegion diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 8f1388f9fb..f3ce8183dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -80,6 +80,9 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -179,6 +182,12 @@ type sharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -434,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index b209793744..b542df9315 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d03e561703..f63b61e4c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.25" +const SDKVersion = "1.48.14" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 58b2ecfbee..2cc389eaf2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -3417,6 +3417,10 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -6085,6 +6089,10 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -6346,6 +6354,10 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -14218,9 +14230,7 @@ type ExportDescription struct { // Point in time from which table data was exported. ExportTime *time.Time `type:"timestamp"` - // Choice of whether to execute as a full export or incremental export. Valid - // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, - // the IncrementalExportSpecification must also be used. + // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. ExportType *string `type:"string" enum:"ExportType"` // Status code for the result of the failed export. @@ -14487,9 +14497,7 @@ type ExportSummary struct { // FAILED. ExportStatus *string `type:"string" enum:"ExportStatus"` - // Choice of whether to execute as a full export or incremental export. Valid - // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, - // the IncrementalExportSpecification must also be used. + // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. ExportType *string `type:"string" enum:"ExportType"` } @@ -14555,8 +14563,9 @@ type ExportTableToPointInTimeInput struct { ExportTime *time.Time `type:"timestamp"` // Choice of whether to execute as a full export or incremental export. Valid - // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, - // the IncrementalExportSpecification must also be used. + // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. + // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must + // also be used. ExportType *string `type:"string" enum:"ExportType"` // Optional object containing the parameters specific to an incremental export. @@ -16592,8 +16601,8 @@ type IncrementalExportSpecification struct { // this is not provided, the latest time with data available will be used. ExportToTime *time.Time `type:"timestamp"` - // Choice of whether to output the previous item image prior to the start time - // of the incremental export. Valid values are NEW_AND_OLD_IMAGES and NEW_IMAGES. + // The view type that was chosen for the export. Valid values are NEW_AND_OLD_IMAGES + // and NEW_IMAGES. The default value is NEW_AND_OLD_IMAGES. ExportViewType *string `type:"string" enum:"ExportViewType"` } @@ -22336,7 +22345,7 @@ type ScanInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) // in the Amazon DynamoDB Developer Guide. FilterExpression *string `type:"string"` @@ -24550,6 +24559,10 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 79b80e693a..d03749e0c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -239,6 +239,10 @@ const ( // // * There is a user error, such as an invalid data format. // + // * There is an ongoing TransactWriteItems operation that conflicts with + // a concurrent TransactWriteItems request. In this case the TransactWriteItems + // operation fails with a TransactionCanceledException. + // // DynamoDB cancels a TransactGetItems request under the following circumstances: // // * There is an ongoing TransactGetItems operation that conflicts with a diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 3f6e837bad..2ff147ccf4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -279,6 +279,10 @@ func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req * // exceeds the limit. To add more filter polices, submit an Amazon SNS Limit // Increase case in the Amazon Web Services Support Center. // +// - ErrCodeReplayLimitExceededException "ReplayLimitExceeded" +// Indicates that the request parameter has exceeded the maximum number of concurrent +// message replays. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ConfirmSubscription func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { req, out := c.ConfirmSubscriptionRequest(input) @@ -1085,6 +1089,9 @@ func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, // - ErrCodeInvalidParameterException "InvalidParameter" // Indicates that a request parameter does not comply with the associated constraints. // +// - ErrCodeInvalidStateException "InvalidState" +// Indicates that the specified state is not a valid state for an event source. +// // - ErrCodeInternalErrorException "InternalError" // Indicates an internal service error. // @@ -3272,13 +3279,13 @@ func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output // Indicates that the user has been denied access to the requested resource. // // - ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) +// The request was rejected because the specified Amazon Web Services KMS key // isn't enabled. // // - ErrCodeKMSInvalidStateException "KMSInvalidState" // The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// valid for this request. For more information, see Key states of Amazon Web +// Services KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. // // - ErrCodeKMSNotFoundException "KMSNotFound" @@ -3444,13 +3451,13 @@ func (c *SNS) PublishBatchRequest(input *PublishBatchInput) (req *request.Reques // The batch request contains more entries than permissible. // // - ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) +// The request was rejected because the specified Amazon Web Services KMS key // isn't enabled. // // - ErrCodeKMSInvalidStateException "KMSInvalidState" // The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// valid for this request. For more information, see Key states of Amazon Web +// Services KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. // // - ErrCodeKMSNotFoundException "KMSNotFound" @@ -4032,6 +4039,10 @@ func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesI // exceeds the limit. To add more filter polices, submit an Amazon SNS Limit // Increase case in the Amazon Web Services Support Center. // +// - ErrCodeReplayLimitExceededException "ReplayLimitExceeded" +// Indicates that the request parameter has exceeded the maximum number of concurrent +// message replays. +// // - ErrCodeInternalErrorException "InternalError" // Indicates an internal service error. // @@ -4209,7 +4220,7 @@ func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, out // to confirm the subscription. // // You call the ConfirmSubscription action with the token from the subscription -// response. Confirmation tokens are valid for three days. +// response. Confirmation tokens are valid for two days. // // This action is throttled at 100 transactions per second (TPS). // @@ -4230,6 +4241,10 @@ func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, out // exceeds the limit. To add more filter polices, submit an Amazon SNS Limit // Increase case in the Amazon Web Services Support Center. // +// - ErrCodeReplayLimitExceededException "ReplayLimitExceeded" +// Indicates that the request parameter has exceeded the maximum number of concurrent +// message replays. +// // - ErrCodeInvalidParameterException "InvalidParameter" // Indicates that a request parameter does not comply with the associated constraints. // @@ -5423,7 +5438,13 @@ type CreateTopicInput struct { // // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): // - // * FifoTopic – When this is set to true, a FIFO topic is created. + // * ArchivePolicy – Adds or updates an inline policy document to archive + // messages stored in the specified Amazon SNS topic. + // + // * BeginningArchiveTime – The earliest starting point at which a message + // in the topic’s archive can be replayed from. This point in time is based + // on the configured message retention period set by the topic’s message + // archiving policy. // // * ContentBasedDeduplication – Enables content-based deduplication for // FIFO topics. By default, ContentBasedDeduplication is set to false. If @@ -9154,6 +9175,19 @@ type SubscribeInput struct { // more information, see Fanout to Kinesis Data Firehose delivery streams // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) // in the Amazon SNS Developer Guide. + // + // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): + // + // * ReplayPolicy – Adds or updates an inline policy document for a subscription + // to replay messages stored in the specified Amazon SNS topic. + // + // * ReplayStatus – Retrieves the status of the subscription message replay, + // which can be one of the following: Completed – The replay has successfully + // redelivered all messages, and is now delivering newly published messages. + // If an ending point was specified in the ReplayPolicy then the subscription + // will no longer receive newly published messages. In progress – The replay + // is currently replaying the selected messages. Failed – The replay was + // unable to complete. Pending – The default state while the replay initiates. Attributes map[string]*string `type:"map"` // The endpoint that you want to receive notifications. Endpoints vary by protocol: diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go index 979eee05e1..da042a2219 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go @@ -80,6 +80,12 @@ const ( // sign your request using Signature Version 4. ErrCodeInvalidSecurityException = "InvalidSecurity" + // ErrCodeInvalidStateException for service response error code + // "InvalidState". + // + // Indicates that the specified state is not a valid state for an event source. + ErrCodeInvalidStateException = "InvalidState" + // ErrCodeKMSAccessDeniedException for service response error code // "KMSAccessDenied". // @@ -90,7 +96,7 @@ const ( // ErrCodeKMSDisabledException for service response error code // "KMSDisabled". // - // The request was rejected because the specified customer master key (CMK) + // The request was rejected because the specified Amazon Web Services KMS key // isn't enabled. ErrCodeKMSDisabledException = "KMSDisabled" @@ -98,8 +104,8 @@ const ( // "KMSInvalidState". // // The request was rejected because the state of the specified resource isn't - // valid for this request. For more information, see How Key State Affects Use - // of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // valid for this request. For more information, see Key states of Amazon Web + // Services KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. ErrCodeKMSInvalidStateException = "KMSInvalidState" @@ -144,6 +150,13 @@ const ( // Exception error indicating platform application disabled. ErrCodePlatformApplicationDisabledException = "PlatformApplicationDisabled" + // ErrCodeReplayLimitExceededException for service response error code + // "ReplayLimitExceeded". + // + // Indicates that the request parameter has exceeded the maximum number of concurrent + // message replays. + ErrCodeReplayLimitExceededException = "ReplayLimitExceeded" + // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFound". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index c743913c57..04f6c811b6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -56,9 +56,10 @@ func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Requ // CreateToken API operation for AWS SSO OIDC. // -// Creates and returns an access token for the authorized client. The access -// token issued will be used to fetch short-term credentials for the assigned -// roles in the AWS account. +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -133,6 +134,131 @@ func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInpu return out, req.Send() } +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterClient = "RegisterClient" // RegisterClientRequest generates a "aws/request.Request" representing the @@ -331,8 +457,11 @@ type AccessDeniedException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be access_denied. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -400,8 +529,11 @@ type AuthorizationPendingException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be authorization_pending. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -466,8 +598,8 @@ func (s *AuthorizationPendingException) RequestID() string { type CreateTokenInput struct { _ struct{} `type:"structure"` - // The unique identifier string for each client. This value should come from - // the persisted result of the RegisterClient API. + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. // // ClientId is a required field ClientId *string `locationName:"clientId" type:"string" required:"true"` @@ -475,23 +607,30 @@ type CreateTokenInput struct { // A secret string generated for the client. This value should come from the // persisted result of the RegisterClient API. // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // // ClientSecret is a required field - ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` - // The authorization code received from the authorization service. This parameter - // is required to perform an authorization grant request to get access to a - // token. + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` - // Used only when calling this API for the device code grant type. This short-term - // code is used to identify this authentication attempt. This should come from - // an in-memory reference to the result of the StartDeviceAuthorization API. + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. DeviceCode *string `locationName:"deviceCode" type:"string"` - // Supports grant types for the authorization code, refresh token, and device - // code request. For device code requests, specify the following value: + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code // - // urn:ietf:params:oauth:grant-type:device_code + // * Refresh Token - refresh_token // // For information about how to obtain the device code, see the StartDeviceAuthorization // topic. @@ -499,21 +638,28 @@ type CreateTokenInput struct { // GrantType is a required field GrantType *string `locationName:"grantType" type:"string" required:"true"` - // The location of the application that will receive the authorization code. - // Users authorize the service to send the request to this location. + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. RedirectUri *string `locationName:"redirectUri" type:"string"` - // Currently, refreshToken is not yet implemented and is not supported. For - // more information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. // - // The token used to obtain an access token in the event that the access token - // is invalid or expired. - RefreshToken *string `locationName:"refreshToken" type:"string"` - - // The list of scopes that is defined by the client. Upon authorization, this - // list is used to restrict permissions when granting an access token. + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. Scope []*string `locationName:"scope" type:"list"` } @@ -605,31 +751,43 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // An opaque token to access IAM Identity Center resources assigned to a user. - AccessToken *string `locationName:"accessToken" type:"string"` + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` // Indicates the time in seconds when an access token will expire. ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` - // Currently, idToken is not yet implemented and is not supported. For more - // information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). // - // The identifier of the user that associated with the access token, if present. - IdToken *string `locationName:"idToken" type:"string"` - - // Currently, refreshToken is not yet implemented and is not supported. For - // more information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + // A token that, if present, can be used to refresh a previously issued access // token that might have expired. - RefreshToken *string `locationName:"refreshToken" type:"string"` + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` // Used to notify the client that the returned token is an access token. The - // supported type is BearerToken. + // supported token type is Bearer. TokenType *string `locationName:"tokenType" type:"string"` } @@ -681,14 +839,312 @@ func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { return s } +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + // Indicates that the token issued by the service is expired and is no longer // valid. type ExpiredTokenException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be expired_token. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -756,8 +1212,11 @@ type InternalServerException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be server_error. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -826,8 +1285,11 @@ type InvalidClientException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_client. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -895,8 +1357,11 @@ type InvalidClientMetadataException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_client_metadata. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -964,8 +1429,11 @@ type InvalidGrantException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_grant. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1033,8 +1501,11 @@ type InvalidRequestException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_request. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1096,13 +1567,95 @@ func (s *InvalidRequestException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that the scope provided in the request is invalid. type InvalidScopeException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_scope. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1238,7 +1791,7 @@ func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { type RegisterClientOutput struct { _ struct{} `type:"structure"` - // The endpoint where the client can request authorization. + // An endpoint that the client can use to request authorization. AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` // The unique identifier string for each client. This client uses this identifier @@ -1250,12 +1803,16 @@ type RegisterClientOutput struct { // A secret string generated for the client. The client will use this string // to get authenticated by the service in subsequent calls. - ClientSecret *string `locationName:"clientSecret" type:"string"` + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` // Indicates the time at which the clientId and clientSecret will become invalid. ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` - // The endpoint where the client can get an access token. + // An endpoint that the client can use to create tokens. TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` } @@ -1319,8 +1876,11 @@ type SlowDownException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be slow_down. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1395,11 +1955,15 @@ type StartDeviceAuthorizationInput struct { // A secret string that is generated for the client. This value should come // from the persisted result of the RegisterClient API operation. // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // // ClientSecret is a required field - ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` - // The URL for the AWS access portal. For more information, see Using the AWS - // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) // in the IAM Identity Center User Guide. // // StartUrl is a required field @@ -1550,8 +2114,11 @@ type UnauthorizedClientException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be unauthorized_client. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1618,8 +2185,11 @@ type UnsupportedGrantTypeException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be unsupported_grant_type. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go index 8b5ee6019a..083568c616 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -3,15 +3,13 @@ // Package ssooidc provides the client and types for making API // requests to AWS SSO OIDC. // -// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect -// (OIDC) is a web service that enables a client (such as AWS CLI or a native -// application) to register with IAM Identity Center. The service also enables -// the client to fetch the user’s access token upon successful authentication -// and authorization with IAM Identity Center. +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. // -// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces -// will continue to retain their original name for backward compatibility purposes. -// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// IAM Identity Center uses the sso and identitystore API namespaces. // // # Considerations for Using This Guide // @@ -22,21 +20,24 @@ // - The IAM Identity Center OIDC service currently implements only the portions // of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 // (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single -// sign-on authentication with the AWS CLI. Support for other OIDC flows -// frequently needed for native applications, such as Authorization Code -// Flow (+ PKCE), will be addressed in future releases. +// sign-on authentication with the CLI. // -// - The service emits only OIDC access tokens, such that obtaining a new -// token (For example, token refresh) requires explicit user re-authentication. +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). // -// - The access tokens provided by this service grant access to all AWS account -// entitlements assigned to an IAM Identity Center user, not just a particular -// application. +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. // // - The documentation in this guide does not describe the mechanism to convert -// the access token into AWS Auth (“sigv4”) credentials for use with -// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials -// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// the access token into Amazon Web Services Auth (“sigv4”) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) // in the IAM Identity Center Portal API Reference Guide. // // For general information about IAM Identity Center, see What is IAM Identity diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index 6983770126..e6242e4928 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -64,6 +64,13 @@ const ( // a required parameter might be missing or out of range. ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + // ErrCodeInvalidScopeException for service response error code // "InvalidScopeException". // @@ -100,6 +107,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, "SlowDownException": newErrorSlowDownException, "UnauthorizedClientException": newErrorUnauthorizedClientException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go index 969f33c37b..782bae3692 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -51,7 +51,7 @@ const ( func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "awsssooidc" + c.SigningName = "sso-oauth" } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 11af63b4d8..2c395f5f67 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1460,7 +1460,15 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` - // Reserved for future use. + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []*ProvidedContext `type:"list"` // The Amazon Resource Name (ARN) of the role to assume. @@ -3405,14 +3413,18 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } -// Reserved for future use. +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. type ProvidedContext struct { _ struct{} `type:"structure"` - // Reserved for future use. + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. ContextAssertion *string `min:"4" type:"string"` - // Reserved for future use. + // The context provider ARN from which the trusted context assertion was generated. ProviderArn *string `min:"20" type:"string"` } diff --git a/vendor/github.com/bboreham/go-loser/.gitignore b/vendor/github.com/bboreham/go-loser/.gitignore new file mode 100644 index 0000000000..3b735ec4a8 --- /dev/null +++ b/vendor/github.com/bboreham/go-loser/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/bboreham/go-loser/LICENSE b/vendor/github.com/bboreham/go-loser/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/bboreham/go-loser/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/bboreham/go-loser/README.md b/vendor/github.com/bboreham/go-loser/README.md new file mode 100644 index 0000000000..9e6cbc3cc5 --- /dev/null +++ b/vendor/github.com/bboreham/go-loser/README.md @@ -0,0 +1,8 @@ +# go-loser +Loser Tree data structure, for fast k-way merge + +I will be speaking about this at [GopherCon](https://www.gophercon.com/agenda/session/1160355) on 27th Sept 2023. + +There are currently two versions of the code on two Git branches: [main](https://github.com/bboreham/go-loser/tree/main), which works for built-in types like `int` and `string`, and [any](https://github.com/bboreham/go-loser/tree/any) which works on any type but requires you to pass in a function pointer to do `less` comparisons. + +See https://en.wikipedia.org/wiki/K-way_merge_algorithm#Tournament_Tree for more details on the algorithm. diff --git a/vendor/github.com/bboreham/go-loser/tree.go b/vendor/github.com/bboreham/go-loser/tree.go new file mode 100644 index 0000000000..ba097abed2 --- /dev/null +++ b/vendor/github.com/bboreham/go-loser/tree.go @@ -0,0 +1,153 @@ +// Loser tree, from https://en.wikipedia.org/wiki/K-way_merge_algorithm#Tournament_Tree + +package loser + +import "golang.org/x/exp/constraints" + +type Value constraints.Ordered + +type Sequence[E Value] interface { + At() E // Returns the current value. + Next() bool // Advances and returns true if there is a value at this new position. +} + +func New[E Value, S Sequence[E]](sequences []S, maxVal E) *Tree[E, S] { + nSequences := len(sequences) + t := Tree[E, S]{ + maxVal: maxVal, + nodes: make([]node[E, S], nSequences*2), + } + for i, s := range sequences { + t.nodes[i+nSequences].items = s + t.moveNext(i + nSequences) // Must call Next on each item so that At() has a value. + } + if nSequences > 0 { + t.nodes[0].index = -1 // flag to be initialized on first call to Next(). + } + return &t +} + +// Call the close function on all sequences that are still open. +func (t *Tree[E, S]) Close() { + for _, e := range t.nodes[len(t.nodes)/2 : len(t.nodes)] { + if e.index == -1 { + continue + } + } +} + +// A loser tree is a binary tree laid out such that nodes N and N+1 have parent N/2. +// We store M leaf nodes in positions M...2M-1, and M-1 internal nodes in positions 1..M-1. +// Node 0 is a special node, containing the winner of the contest. +type Tree[E Value, S Sequence[E]] struct { + maxVal E + nodes []node[E, S] +} + +type node[E Value, S Sequence[E]] struct { + index int // This is the loser for all nodes except the 0th, where it is the winner. + value E // Value copied from the loser node, or winner for node 0. + items S // Only populated for leaf nodes. +} + +func (t *Tree[E, S]) moveNext(index int) bool { + n := &t.nodes[index] + if n.items.Next() { + n.value = n.items.At() + return true + } + n.value = t.maxVal + n.index = -1 + return false +} + +func (t *Tree[E, S]) Winner() S { + return t.nodes[t.nodes[0].index].items +} + +func (t *Tree[E, S]) At() E { + return t.nodes[0].value +} + +func (t *Tree[E, S]) Next() bool { + nodes := t.nodes + if len(nodes) == 0 { + return false + } + if nodes[0].index == -1 { // If tree has not been initialized yet, do that. + t.initialize() + return nodes[nodes[0].index].index != -1 + } + if nodes[nodes[0].index].index == -1 { // already exhausted + return false + } + t.moveNext(nodes[0].index) + t.replayGames(nodes[0].index) + return nodes[nodes[0].index].index != -1 +} + +// Current winner has been advanced independently; fix up the loser tree. +func (t *Tree[E, S]) Fix(closed bool) { + nodes := t.nodes + cur := &nodes[nodes[0].index] + if closed { + cur.value = t.maxVal + cur.index = -1 + } else { + cur.value = cur.items.At() + } + t.replayGames(nodes[0].index) +} + +func (t *Tree[E, S]) IsEmpty() bool { + nodes := t.nodes + if nodes[0].index == -1 { // If tree has not been initialized yet, do that. + t.initialize() + } + return nodes[nodes[0].index].index == -1 +} + +func (t *Tree[E, S]) initialize() { + winner := t.playGame(1) + t.nodes[0].index = winner + t.nodes[0].value = t.nodes[winner].value +} + +// Find the winner at position pos; if it is a non-leaf node, store the loser. +// pos must be >= 1 and < len(t.nodes) +func (t *Tree[E, S]) playGame(pos int) int { + nodes := t.nodes + if pos >= len(nodes)/2 { + return pos + } + left := t.playGame(pos * 2) + right := t.playGame(pos*2 + 1) + var loser, winner int + if nodes[left].value < nodes[right].value { + loser, winner = right, left + } else { + loser, winner = left, right + } + nodes[pos].index = loser + nodes[pos].value = nodes[loser].value + return winner +} + +// Starting at pos, which is a winner, re-consider all values up to the root. +func (t *Tree[E, S]) replayGames(pos int) { + nodes := t.nodes + winningValue := nodes[pos].value + for n := parent(pos); n != 0; n = parent(n) { + node := &nodes[n] + if node.value < winningValue { + // Record pos as the loser here, and the old loser is the new winner. + node.index, pos = pos, node.index + node.value, winningValue = winningValue, node.value + } + } + // pos is now the winner; store it in node 0. + nodes[0].index = pos + nodes[0].value = winningValue +} + +func parent(i int) int { return i >> 1 } diff --git a/vendor/github.com/cortexproject/promqlsmith/.go-version b/vendor/github.com/cortexproject/promqlsmith/.go-version index bc4493477a..5fb5a6b4f5 100644 --- a/vendor/github.com/cortexproject/promqlsmith/.go-version +++ b/vendor/github.com/cortexproject/promqlsmith/.go-version @@ -1 +1 @@ -1.19 +1.20 diff --git a/vendor/github.com/cortexproject/promqlsmith/opts.go b/vendor/github.com/cortexproject/promqlsmith/opts.go index 0e85e08915..f7e710e479 100644 --- a/vendor/github.com/cortexproject/promqlsmith/opts.go +++ b/vendor/github.com/cortexproject/promqlsmith/opts.go @@ -64,7 +64,10 @@ func init() { if slices.Contains(f.ArgTypes, parser.ValueTypeString) { continue } - defaultSupportedFuncs = append(defaultSupportedFuncs, f) + // Ignore experimental functions for now. + if !f.Experimental { + defaultSupportedFuncs = append(defaultSupportedFuncs, f) + } } } diff --git a/vendor/github.com/cortexproject/promqlsmith/walk.go b/vendor/github.com/cortexproject/promqlsmith/walk.go index 0757fe835f..4a2db06b73 100644 --- a/vendor/github.com/cortexproject/promqlsmith/walk.go +++ b/vendor/github.com/cortexproject/promqlsmith/walk.go @@ -4,6 +4,7 @@ import ( "fmt" "math/rand" "sort" + "strings" "time" "github.com/prometheus/prometheus/model/labels" @@ -88,9 +89,6 @@ func (s *PromQLSmith) walkAggregateParam(op parser.ItemType) parser.Expr { // or function that returns matrix. func (s *PromQLSmith) walkBinaryExpr(valueTypes ...parser.ValueType) parser.Expr { valueTypes = keepValueTypes(valueTypes, vectorAndScalarValueTypes) - if len(valueTypes) == 0 { - valueTypes = vectorAndScalarValueTypes - } expr := &parser.BinaryExpr{ Op: s.walkBinaryOp(!slices.Contains(valueTypes, parser.ValueTypeVector)), VectorMatching: &parser.VectorMatching{ @@ -244,6 +242,7 @@ func (s *PromQLSmith) walkCall(valueTypes ...parser.ValueType) parser.Expr { } } } + sort.Slice(funcs, func(i, j int) bool { return strings.Compare(funcs[i].Name, funcs[j].Name) < 0 }) expr.Func = funcs[s.rnd.Intn(len(funcs))] s.walkFuncArgs(expr) return expr @@ -383,6 +382,7 @@ func exprsFromValueTypes(valueTypes []parser.ValueType) []ExprType { for expr := range set { res = append(res, expr) } + sort.Slice(res, func(i, j int) bool { return res[i] < res[j] }) return res } @@ -396,7 +396,11 @@ func wrapParenExpr(expr parser.Expr) parser.Expr { // keepValueTypes picks value types that we should keep from the input. // input shouldn't contain duplicate value types. +// If no input value types are provided, use value types to keep as result. func keepValueTypes(input []parser.ValueType, keep []parser.ValueType) []parser.ValueType { + if len(input) == 0 { + return keep + } out := make([]parser.ValueType, 0, len(keep)) s := make(map[parser.ValueType]struct{}) for _, vt := range keep { @@ -407,6 +411,7 @@ func keepValueTypes(input []parser.ValueType, keep []parser.ValueType) []parser. out = append(out, vt) } } + sort.Slice(out, func(i, j int) bool { return out[i] < out[j] }) return out } @@ -509,6 +514,9 @@ func getOutputSeries(expr parser.Expr) ([]labels.Labels, bool) { for _, v := range m { output = append(output, v) } + sort.Slice(output, func(i, j int) bool { + return labels.Compare(output[i], output[j]) < 0 + }) return output, false case *parser.SubqueryExpr: return getOutputSeries(node.Expr) diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml deleted file mode 100644 index bfc421200d..0000000000 --- a/vendor/github.com/felixge/httpsnoop/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile index 2d84889aed..4e12afdd90 100644 --- a/vendor/github.com/felixge/httpsnoop/Makefile +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -1,7 +1,7 @@ .PHONY: ci generate clean ci: clean generate - go test -v ./... + go test -race -v ./... generate: go generate . diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md index ddcecd13e7..cf6b42f3d7 100644 --- a/vendor/github.com/felixge/httpsnoop/README.md +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -7,8 +7,8 @@ http.Handlers. Doing this requires non-trivial wrapping of the http.ResponseWriter interface, which is also exposed for users interested in a more low-level API. -[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) -[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) ## Usage Example diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go index b77cc7c009..bec7b71b39 100644 --- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri return func(code int) { next(code) - if !headerWritten { + if !(code >= 100 && code <= 199) && !headerWritten { m.Code = code headerWritten = true } diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go index 31cbdfb8ef..101cedde67 100644 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -1,5 +1,5 @@ // +build go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. package httpsnoop diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go index ab99c07c7a..e0951df152 100644 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -1,5 +1,5 @@ // +build !go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. package httpsnoop diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml new file mode 100644 index 0000000000..ffc7b992b3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -0,0 +1,13 @@ +freebsd_task: + name: 'FreeBSD' + freebsd_instance: + image_family: freebsd-13-2 + install_script: + - pkg update -f + - pkg install -y go + test_script: + # run tests as user "cirrus" instead of root + - pw useradd cirrus -m + - chown -R cirrus:cirrus . + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 1d89d85ce4..391cc076b1 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -4,3 +4,4 @@ # Output of go build ./cmd/fsnotify /fsnotify +/fsnotify.exe diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index 77f9593bd5..e0e5757549 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,16 +1,87 @@ # Changelog -All notable changes to this project will be documented in this file. +Unreleased +---------- +Nothing yet. -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +1.7.0 - 2023-10-22 +------------------ +This version of fsnotify needs Go 1.17. -## [Unreleased] +### Additions -Nothing yet. +- illumos: add FEN backend to support illumos and Solaris. ([#371]) + +- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful + in cases where you can't control the kernel buffer and receive a large number + of events in bursts. ([#550], [#572]) + +- all: add `AddWith()`, which is identical to `Add()` but allows passing + options. ([#521]) + +- windows: allow setting the ReadDirectoryChangesW() buffer size with + `fsnotify.WithBufferSize()`; the default of 64K is the highest value that + works on all platforms and is enough for most purposes, but in some cases a + highest buffer is needed. ([#521]) + +### Changes and fixes + +- inotify: remove watcher if a watched path is renamed ([#518]) + + After a rename the reported name wasn't updated, or even an empty string. + Inotify doesn't provide any good facilities to update it, so just remove the + watcher. This is already how it worked on kqueue and FEN. + + On Windows this does work, and remains working. + +- windows: don't listen for file attribute changes ([#520]) + + File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, + with no way to see if they're a file write or attribute change, so would show + up as a fsnotify.Write event. This is never useful, and could result in many + spurious Write events. + +- windows: return `ErrEventOverflow` if the buffer is full ([#525]) + + Before it would merely return "short read", making it hard to detect this + error. + +- kqueue: make sure events for all files are delivered properly when removing a + watched directory ([#526]) + + Previously they would get sent with `""` (empty string) or `"."` as the path + name. + +- kqueue: don't emit spurious Create events for symbolic links ([#524]) + + The link would get resolved but kqueue would "forget" it already saw the link + itself, resulting on a Create for every Write event for the directory. + +- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) + +- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in + `backend_other.go`, making it easier to use on unsupported platforms such as + WASM, AIX, etc. ([#528]) + +- other: use the `backend_other.go` no-op if the `appengine` build tag is set; + Google AppEngine forbids usage of the unsafe package so the inotify backend + won't compile there. -## [1.6.0] - 2022-10-13 +[#371]: https://github.com/fsnotify/fsnotify/pull/371 +[#516]: https://github.com/fsnotify/fsnotify/pull/516 +[#518]: https://github.com/fsnotify/fsnotify/pull/518 +[#520]: https://github.com/fsnotify/fsnotify/pull/520 +[#521]: https://github.com/fsnotify/fsnotify/pull/521 +[#524]: https://github.com/fsnotify/fsnotify/pull/524 +[#525]: https://github.com/fsnotify/fsnotify/pull/525 +[#526]: https://github.com/fsnotify/fsnotify/pull/526 +[#528]: https://github.com/fsnotify/fsnotify/pull/528 +[#537]: https://github.com/fsnotify/fsnotify/pull/537 +[#550]: https://github.com/fsnotify/fsnotify/pull/550 +[#572]: https://github.com/fsnotify/fsnotify/pull/572 +1.6.0 - 2022-10-13 +------------------ This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, but not documented). It also increases the minimum Linux version to 2.6.32. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index d4e6080feb..e480733d16 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,29 +1,31 @@ fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, and BSD systems. +Windows, Linux, macOS, BSD, and illumos. -Go 1.16 or newer is required; the full documentation is at +Go 1.17 or newer is required; the full documentation is at https://pkg.go.dev/github.com/fsnotify/fsnotify -**It's best to read the documentation at pkg.go.dev, as it's pinned to the last -released version, whereas this README is for the last development version which -may include additions/changes.** - --- Platform support: -| Adapter | OS | Status | -| --------------------- | ---------------| -------------------------------------------------------------| -| inotify | Linux 2.6.32+ | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and macOS should include Android and iOS, but these are currently untested. +| Backend | OS | Status | +| :-------------------- | :--------- | :------------------------------------------------------------------------ | +| inotify | Linux | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FEN | illumos | Supported | +| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | +| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | +| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | +| USN Journals | Windows | [Needs support in x/sys/windows][usn] | +| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | + +Linux and illumos should include Android and Solaris, but these are currently +untested. + +[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 +[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 +[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- @@ -83,20 +85,23 @@ run with: % go run ./cmd/fsnotify +Further detailed documentation can be found in godoc: +https://pkg.go.dev/github.com/fsnotify/fsnotify + FAQ --- ### Will a file still be watched when it's moved to another directory? No, not unless you are watching the location it was moved to. -### Are subdirectories watched too? +### Are subdirectories watched? No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap: [#18]). [#18]: https://github.com/fsnotify/fsnotify/issues/18 ### Do I have to watch the Error and Event channels in a goroutine? -As of now, yes (you can read both channels in the same goroutine using `select`, -you don't need a separate goroutine for both channels; see the example). +Yes. You can read both channels in the same goroutine using `select` (you don't +need a separate goroutine for both channels; see the example). ### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? fsnotify requires support from underlying OS to work. The current NFS and SMB @@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented. [#9]: https://github.com/fsnotify/fsnotify/issues/9 +### Why do I get many Chmod events? +Some programs may generate a lot of attribute changes; for example Spotlight on +macOS, anti-virus programs, backup applications, and some others are known to do +this. As a rule, it's typically best to ignore Chmod events. They're often not +useful, and tend to cause problems. + +Spotlight indexing on macOS can result in multiple events (see [#15]). A +temporary workaround is to add your folder(s) to the *Spotlight Privacy +settings* until we have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 + +### Watching a file doesn't work well +Watching individual files (rather than directories) is generally not recommended +as many programs (especially editors) update files atomically: it will write to +a temporary file which is then moved to to destination, overwriting the original +(or some variant thereof). The watcher on the original file is now lost, as that +no longer exists. + +The upshot of this is that a power failure or crash won't leave a half-written +file. + +Watch the parent directory and use `Event.Name` to filter out files you're not +interested in. There is an example of this in `cmd/fsnotify/file.go`. + Platform-specific notes ----------------------- ### Linux @@ -151,11 +182,3 @@ these platforms. The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to control the maximum number of open files. - -### macOS -Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary -workaround is to add your folder(s) to the *Spotlight Privacy settings* until we -have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 1a95ad8e7c..28497f1dd8 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,10 +1,19 @@ //go:build solaris // +build solaris +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" ) // Watcher watches a set of paths, delivering events on a channel. @@ -17,9 +26,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -33,16 +42,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -58,14 +67,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -92,44 +107,129 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error + + mu sync.Mutex + port *unix.EventPort + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]struct{} // Explicitly watched directories + watches map[string]struct{} // Explicitly watched non-directories } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") + return NewBufferedWatcher(0) } -// Close removes all watches and closes the events channel. +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + w := &Watcher{ + Events: make(chan Event, sz), + Errors: make(chan error), + dirs: make(map[string]struct{}), + watches: make(map[string]struct{}), + done: make(chan struct{}), + } + + var err error + w.port, err = unix.NewEventPort() + if err != nil { + return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) + } + + go w.readEvents() + return w, nil +} + +// sendEvent attempts to send an event to the user, returning true if the event +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendEvent(name string, op Op) (sent bool) { + select { + case w.Events <- Event{Name: name, Op: op}: + return true + case <-w.done: + return false + } +} + +// sendError attempts to send an error to the user, returning true if the error +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendError(err error) (sent bool) { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - return nil + // Take the lock used by associateFile to prevent lingering events from + // being processed after the close + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return nil + } + close(w.done) + return w.port.Close() } // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -139,15 +239,63 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if w.port.PathIsWatched(name) { + return nil + } + + _ = getOptions(opts...) + + // Currently we resolve symlinks that were explicitly requested to be + // watched. Otherwise we would use LStat here. + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Associate all files in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, true, w.associateFile) + if err != nil { + return err + } + + w.mu.Lock() + w.dirs[name] = struct{}{} + w.mu.Unlock() + return nil + } + + err = w.associateFile(name, stat, true) + if err != nil { + return err + } + + w.mu.Lock() + w.watches[name] = struct{}{} + w.mu.Unlock() return nil } @@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + if !w.port.PathIsWatched(name) { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // The user has expressed an intent. Immediately remove this name from + // whichever watch list it might be in. If it's not in there the delete + // doesn't cause harm. + w.mu.Lock() + delete(w.watches, name) + delete(w.dirs, name) + w.mu.Unlock() + + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Remove associations for every file in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, false, w.dissociateFile) + if err != nil { + return err + } + return nil + } + + err = w.port.DissociatePath(name) + if err != nil { + return err + } + return nil } + +// readEvents contains the main loop that runs in a goroutine watching for events. +func (w *Watcher) readEvents() { + // If this function returns, the watcher has been closed and we can close + // these channels + defer func() { + close(w.Errors) + close(w.Events) + }() + + pevents := make([]unix.PortEvent, 8) + for { + count, err := w.port.Get(pevents, 1, nil) + if err != nil && err != unix.ETIME { + // Interrupted system call (count should be 0) ignore and continue + if errors.Is(err, unix.EINTR) && count == 0 { + continue + } + // Get failed because we called w.Close() + if errors.Is(err, unix.EBADF) && w.isClosed() { + return + } + // There was an error not caused by calling w.Close() + if !w.sendError(err) { + return + } + } + + p := pevents[:count] + for _, pevent := range p { + if pevent.Source != unix.PORT_SOURCE_FILE { + // Event from unexpected source received; should never happen. + if !w.sendError(errors.New("Event from unexpected source received")) { + return + } + continue + } + + err = w.handleEvent(&pevent) + if err != nil { + if !w.sendError(err) { + return + } + } + } + } +} + +func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + // Handle all children of the directory. + for _, entry := range files { + finfo, err := entry.Info() + if err != nil { + return err + } + err = handler(filepath.Join(path, finfo.Name()), finfo, false) + if err != nil { + return err + } + } + + // And finally handle the directory itself. + return handler(path, stat, follow) +} + +// handleEvent might need to emit more than one fsnotify event if the events +// bitmap matches more than one event type (e.g. the file was both modified and +// had the attributes changed between when the association was created and the +// when event was returned) +func (w *Watcher) handleEvent(event *unix.PortEvent) error { + var ( + events = event.Events + path = event.Path + fmode = event.Cookie.(os.FileMode) + reRegister = true + ) + + w.mu.Lock() + _, watchedDir := w.dirs[path] + _, watchedPath := w.watches[path] + w.mu.Unlock() + isWatched := watchedDir || watchedPath + + if events&unix.FILE_DELETE != 0 { + if !w.sendEvent(path, Remove) { + return nil + } + reRegister = false + } + if events&unix.FILE_RENAME_FROM != 0 { + if !w.sendEvent(path, Rename) { + return nil + } + // Don't keep watching the new file name + reRegister = false + } + if events&unix.FILE_RENAME_TO != 0 { + // We don't report a Rename event for this case, because Rename events + // are interpreted as referring to the _old_ name of the file, and in + // this case the event would refer to the new name of the file. This + // type of rename event is not supported by fsnotify. + + // inotify reports a Remove event in this case, so we simulate this + // here. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't keep watching the file that was removed + reRegister = false + } + + // The file is gone, nothing left to do. + if !reRegister { + if watchedDir { + w.mu.Lock() + delete(w.dirs, path) + w.mu.Unlock() + } + if watchedPath { + w.mu.Lock() + delete(w.watches, path) + w.mu.Unlock() + } + return nil + } + + // If we didn't get a deletion the file still exists and we're going to have + // to watch it again. Let's Stat it now so that we can compare permissions + // and have what we need to continue watching the file + + stat, err := os.Lstat(path) + if err != nil { + // This is unexpected, but we should still emit an event. This happens + // most often on "rm -r" of a subdirectory inside a watched directory We + // get a modify event of something happening inside, but by the time we + // get here, the sudirectory is already gone. Clearly we were watching + // this path but now it is gone. Let's tell the user that it was + // removed. + if !w.sendEvent(path, Remove) { + return nil + } + // Suppress extra write events on removed directories; they are not + // informative and can be confusing. + return nil + } + + // resolve symlinks that were explicitly watched as we would have at Add() + // time. this helps suppress spurious Chmod events on watched symlinks + if isWatched { + stat, err = os.Stat(path) + if err != nil { + // The symlink still exists, but the target is gone. Report the + // Remove similar to above. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't return the error + } + } + + if events&unix.FILE_MODIFIED != 0 { + if fmode.IsDir() { + if watchedDir { + if err := w.updateDirectory(path); err != nil { + return err + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } + if events&unix.FILE_ATTRIB != 0 && stat != nil { + // Only send Chmod if perms changed + if stat.Mode().Perm() != fmode.Perm() { + if !w.sendEvent(path, Chmod) { + return nil + } + } + } + + if stat != nil { + // If we get here, it means we've hit an event above that requires us to + // continue watching the file or directory + return w.associateFile(path, stat, isWatched) + } + return nil +} + +func (w *Watcher) updateDirectory(path string) error { + // The directory was modified, so we must find unwatched entities and watch + // them. If something was removed from the directory, nothing will happen, + // as everything else should still be watched. + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, entry := range files { + path := filepath.Join(path, entry.Name()) + if w.port.PathIsWatched(path) { + continue + } + + finfo, err := entry.Info() + if err != nil { + return err + } + err = w.associateFile(path, finfo, false) + if err != nil { + if !w.sendError(err) { + return nil + } + } + if !w.sendEvent(path, Create) { + return nil + } + } + return nil +} + +func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { + if w.isClosed() { + return ErrClosed + } + // This is primarily protecting the call to AssociatePath but it is + // important and intentional that the call to PathIsWatched is also + // protected by this mutex. Without this mutex, AssociatePath has been seen + // to error out that the path is already associated. + w.mu.Lock() + defer w.mu.Unlock() + + if w.port.PathIsWatched(path) { + // Remove the old association in favor of this one If we get ENOENT, + // then while the x/sys/unix wrapper still thought that this path was + // associated, the underlying event port did not. This call will have + // cleared up that discrepancy. The most likely cause is that the event + // has fired but we haven't processed it yet. + err := w.port.DissociatePath(path) + if err != nil && err != unix.ENOENT { + return err + } + } + // FILE_NOFOLLOW means we watch symlinks themselves rather than their + // targets. + events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW + if follow { + // We *DO* follow symlinks for explicitly watched entries. + events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, + events, + stat.Mode()) +} + +func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { + if !w.port.PathIsWatched(path) { + return nil + } + return w.port.DissociatePath(path) +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)+len(w.dirs)) + for pathname := range w.dirs { + entries = append(entries, pathname) + } + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 54c77fbb0e..921c1c1e40 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,5 +1,8 @@ -//go:build linux -// +build linux +//go:build linux && !appengine +// +build linux,!appengine + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh package fsnotify @@ -26,9 +29,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -42,16 +45,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -67,14 +70,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -101,36 +110,148 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after // calling Fd(). See: https://github.com/golang/go/issues/26439 fd int - mu sync.Mutex // Map access inotifyFile *os.File - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close + watches *watches + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + closeMu sync.Mutex + doneResp chan struct{} // Channel to respond to Close +} + +type ( + watches struct { + mu sync.RWMutex + wd map[uint32]*watch // wd → watch + path map[string]uint32 // pathname → wd + } + watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[uint32]*watch), + path: make(map[string]uint32), + } +} + +func (w *watches) len() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.wd) +} + +func (w *watches) add(ww *watch) { + w.mu.Lock() + defer w.mu.Unlock() + w.wd[ww.wd] = ww + w.path[ww.path] = ww.wd +} + +func (w *watches) remove(wd uint32) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.path, w.wd[wd].path) + delete(w.wd, wd) +} + +func (w *watches) removePath(path string) (uint32, bool) { + w.mu.Lock() + defer w.mu.Unlock() + + wd, ok := w.path[path] + if !ok { + return 0, false + } + + delete(w.path, path) + delete(w.wd, wd) + + return wd, true +} + +func (w *watches) byPath(path string) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[w.path[path]] +} + +func (w *watches) byWd(wd uint32) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[wd] +} + +func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { + w.mu.Lock() + defer w.mu.Unlock() + + var existing *watch + wd, ok := w.path[path] + if ok { + existing = w.wd[wd] + } + + upd, err := f(existing) + if err != nil { + return err + } + if upd != nil { + w.wd[upd.wd] = upd + w.path[upd.path] = upd.wd + + if upd.wd != wd { + delete(w.wd, wd) + } + } + + return nil } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - // Create inotify fd - // Need to set the FD to nonblocking mode in order for SetDeadline methods to work - // Otherwise, blocking i/o operations won't terminate on close + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + // Need to set nonblocking mode for SetDeadline to work, otherwise blocking + // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) if fd == -1 { return nil, errno @@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) { w := &Watcher{ fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), + watches: newWatches(), + Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), @@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool { case w.Events <- e: return true case <-w.done: + return false } - return false } // Returns true if the error was sent, or false if watcher is closed. @@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - w.mu.Lock() + w.closeMu.Lock() if w.isClosed() { - w.mu.Unlock() + w.closeMu.Unlock() return nil } - - // Send 'close' signal to goroutine, and set the Watcher to closed. close(w.done) - w.mu.Unlock() + w.closeMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -207,17 +325,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -227,44 +349,59 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { if w.isClosed() { - return errors.New("inotify instance already closed") + return ErrClosed } + name = filepath.Clean(name) + _ = getOptions(opts...) + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } + return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + if existing != nil { + flags |= existing.flags | unix.IN_MASK_ADD + } - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } + wd, err := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return nil, err + } - return nil + if existing == nil { + return &watch{ + wd: uint32(wd), + path: name, + flags: flags, + }, nil + } + + existing.wd = uint32(wd) + existing.flags = flags + return existing, nil + }) } // Remove stops monitoring the path for changes. @@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] + if w.isClosed() { + return nil + } + return w.remove(filepath.Clean(name)) +} - // Remove it from inotify. +func (w *Watcher) remove(name string) error { + wd, ok := w.watches.removePath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + success, errno := unix.InotifyRmWatch(w.fd, wd) if success == -1 { // TODO: Perhaps it's not helpful to return an error here in every case; // The only two possible errors are: @@ -312,28 +439,28 @@ func (w *Watcher) Remove(name string) error { // are watching is deleted. return errno } - return nil } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() + if w.isClosed() { + return nil + } - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { + entries := make([]string, 0, w.watches.len()) + w.watches.mu.RLock() + for pathname := range w.watches.path { entries = append(entries, pathname) } + w.watches.mu.RUnlock() return entries } -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel func (w *Watcher) readEvents() { @@ -367,14 +494,11 @@ func (w *Watcher) readEvents() { if n < unix.SizeofInotifyEvent { var err error if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF + err = io.EOF // If EOF is received. This should really never happen. } else if n < 0 { - // If an error occurred while reading. - err = errno + err = errno // If an error occurred while reading. } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") + err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -403,18 +527,29 @@ func (w *Watcher) readEvents() { // doesn't append the filename to the event, but we would like to always fill the // the "Name" field with a valid filename. We retrieve the path of the watch from // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) + watch := w.watches.byWd(uint32(raw.Wd)) + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch.wd) + } + // We can't really update the state when a watched path is moved; + // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove + // the watch. + if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return + } + } } - w.mu.Unlock() + var name string + if watch != nil { + name = watch.path + } if nameLen > 0 { // Point "bytes" at the first byte of the filename bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 29087469bf..063a0915a0 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,12 +1,14 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -24,9 +26,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -40,16 +42,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -65,14 +67,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -99,18 +107,27 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error done chan struct{} @@ -133,6 +150,18 @@ type pathInfo struct { // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err @@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) { paths: make(map[int]pathInfo), fileExists: make(map[string]struct{}), userWatches: make(map[string]struct{}), - Events: make(chan Event), + Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), } @@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool { case w.Events <- e: return true case <-w.done: + return false } - return false } // Returns true if the error was sent, or false if watcher is closed. @@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool { case w.Errors <- err: return true case <-w.done: + return false } - return false } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { w.mu.Lock() if w.isClosed { @@ -239,17 +268,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -259,15 +292,28 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + _ = getOptions(opts...) + w.mu.Lock() w.userWatches[name] = struct{}{} w.mu.Unlock() @@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + return w.remove(name, true) +} + +func (w *Watcher) remove(name string, unwatchFiles bool) error { name = filepath.Clean(name) w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } watchfd, ok := w.watches[name] w.mu.Unlock() if !ok { @@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error { w.mu.Unlock() // Find all watched paths that are in this directory that are not external. - if isDir { + if unwatchFiles && isDir { var pathsToRemove []string w.mu.Lock() for fd := range w.watchesByDir[name] { @@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error { } w.mu.Unlock() for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. + // Since these are internal, not much sense in propagating error to + // the user, as that will just confuse them with an error about a + // path they did not explicitly watch themselves. w.Remove(name) } } - return nil } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { w.mu.Lock() defer w.mu.Unlock() + if w.isClosed { + return nil + } entries := make([]string, 0, len(w.userWatches)) for pathname := range w.userWatches { @@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string { // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +// addWatch adds name to the watched file set; the flags are interpreted as +// described in kevent(2). +// +// Returns the real path to the file which was added, with symlinks resolved. func (w *Watcher) addWatch(name string, flags uint32) (string, error) { var isDir bool - // Make ./name and name equivalent name = filepath.Clean(name) w.mu.Lock() if w.isClosed { w.mu.Unlock() - return "", errors.New("kevent instance already closed") + return "", ErrClosed } watchfd, alreadyWatching := w.watches[name] // We already have a watch, but we can still override flags. @@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow Symlinks - // - // Linux can add unresolvable symlinks to the watch list without issue, - // and Windows can't do symlinks period. To maintain consistency, we - // will act like everything is fine if the link can't be resolved. - // There will simply be no file events for broken symlinks. Hence the - // returns of nil on errors. + // Follow Symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) + link, err := os.Readlink(name) if err != nil { + // Return nil because Linux can add unresolvable symlinks to the + // watch list without problems, so maintain consistency with + // that. There will be no file events for broken symlinks. + // TODO: more specific check; returns os.PathError; ENOENT? return "", nil } w.mu.Lock() - _, alreadyWatching = w.watches[name] + _, alreadyWatching = w.watches[link] w.mu.Unlock() if alreadyWatching { - return name, nil + // Add to watches so we don't get spurious Create events later + // on when we diff the directories. + w.watches[name] = 0 + w.fileExists[name] = struct{}{} + return link, nil } + name = link fi, err = os.Lstat(name) if err != nil { return "", nil @@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { } // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and go issues 11180 and 39237. + // See #354, and Go issues 11180 and 39237. for { watchfd, err = unix.Open(name, openMode, 0) if err == nil { @@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { w.watchesByDir[parentName] = watchesByDir } watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} w.mu.Unlock() } if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) w.mu.Lock() watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && @@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Event values that it sends down the Events channel. func (w *Watcher) readEvents() { defer func() { - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - unix.Close(w.closepipe[0]) close(w.Events) close(w.Errors) + _ = unix.Close(w.kq) + unix.Close(w.closepipe[0]) }() eventBuffer := make([]unix.Kevent_t, 10) @@ -513,18 +573,8 @@ func (w *Watcher) readEvents() { event := w.newEvent(path.name, mask) - if path.isDir && !event.Has(Remove) { - // Double check to make sure the directory exists. This can - // happen when we do a rm -fr on a recursively watched folders - // and we receive a modification event first but the folder has - // been deleted and later receive the delete event. - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - event.Op |= Remove - } - } - if event.Has(Rename) || event.Has(Remove) { - w.Remove(event.Name) + w.remove(event.Name, false) w.mu.Lock() delete(w.fileExists, event.Name) w.mu.Unlock() @@ -540,26 +590,30 @@ func (w *Watcher) readEvents() { } if event.Has(Remove) { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. + // Look for a file that may have overwritten this; for example, + // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) w.mu.Lock() _, found := w.watches[fileDir] w.mu.Unlock() if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) + err := w.sendDirectoryChangeEvents(fileDir) + if err != nil { + if !w.sendError(err) { + closed = true + } } } } else { filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) + if fi, err := os.Lstat(filePath); err == nil { + err := w.sendFileCreatedEventIfNew(filePath, fi) + if err != nil { + if !w.sendError(err) { + closed = true + } + } } } } @@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { e.Op |= Chmod } + // No point sending a write and delete event at the same time: if it's gone, + // then it's gone. + if e.Op.Has(Write) && e.Op.Has(Remove) { + e.Op &^= Write + } return e } // watchDirectoryFiles to mimic inotify when adding a watch on a directory func (w *Watcher) watchDirectoryFiles(dirPath string) error { // Get all files - files, err := ioutil.ReadDir(dirPath) + files, err := os.ReadDir(dirPath) if err != nil { return err } - for _, fileInfo := range files { - path := filepath.Join(dirPath, fileInfo.Name()) + for _, f := range files { + path := filepath.Join(dirPath, f.Name()) + + fi, err := f.Info() + if err != nil { + return fmt.Errorf("%q: %w", path, err) + } - cleanPath, err := w.internalWatch(path, fileInfo) + cleanPath, err := w.internalWatch(path, fi) if err != nil { // No permission to read the file; that's not a problem: just skip. // But do add it to w.fileExists to prevent it from being picked up @@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): cleanPath = filepath.Clean(path) default: - return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + return fmt.Errorf("%q: %w", path, err) } } @@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) { - // Get all files - files, err := ioutil.ReadDir(dir) +func (w *Watcher) sendDirectoryChangeEvents(dir string) error { + files, err := os.ReadDir(dir) if err != nil { - if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { - return + // Directory no longer exists: we can ignore this safely. kqueue will + // still give us the correct events. + if errors.Is(err, os.ErrNotExist) { + return nil } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } - // Search for new files - for _, fi := range files { - err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + for _, f := range files { + fi, err := f.Info() if err != nil { - return + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + } + + err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + // Don't need to send an error if this file isn't readable. + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + return nil + } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } } + return nil } // sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { w.mu.Lock() _, doesExist := w.fileExists[filePath] w.mu.Unlock() @@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf } // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) + filePath, err = w.internalWatch(filePath, fi) if err != nil { return err } @@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf return nil } -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory +func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + // mimic Linux providing delete events for subdirectories, but preserve + // the flags used if currently watching subdirectory w.mu.Lock() flags := w.dirFlags[name] w.mu.Unlock() diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index a9bb1c3c4d..d34a23c015 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,39 +1,169 @@ -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows +//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) +// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh package fsnotify -import ( - "fmt" - "runtime" -) +import "errors" -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error +} // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) + return nil, errors.New("fsnotify not supported on the current platform") } -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return nil } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return nil } // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -43,17 +173,26 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - return nil -} +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return nil } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } // Remove stops monitoring the path for changes. // @@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - return nil -} +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index ae392867c0..9bc91e5d61 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,6 +1,13 @@ //go:build windows // +build windows +// Windows backend based on ReadDirectoryChangesW() +// +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +// +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( @@ -27,9 +34,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -43,16 +50,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -68,14 +75,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -102,31 +115,52 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel quit chan chan<- error - mu sync.Mutex // Protects access to watches, isClosed - watches watchMap // Map of watches (key: i-number) - isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Protects access to watches, closed + watches watchMap // Map of watches (key: i-number) + closed bool // Set to true when Close() is first called } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(50) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) { port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, 50), + Events: make(chan Event, sz), Errors: make(chan error), quit: make(chan chan<- error, 1), } @@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) { return w, nil } +func (w *Watcher) isClosed() bool { + w.mu.Lock() + defer w.mu.Unlock() + return w.closed +} + func (w *Watcher) sendEvent(name string, mask uint64) bool { if mask == 0 { return false @@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool { return false } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() + if w.isClosed() { return nil } - w.isClosed = true + + w.mu.Lock() + w.closed = true w.mu.Unlock() // Send "quit" message to the reader goroutine @@ -188,17 +228,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -208,27 +252,41 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("watcher already closed") +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + + with := getOptions(opts...) + if with.bufsize < 4096 { + return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } - w.mu.Unlock() in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + bufsize: with.bufsize, } w.input <- in if err := w.wakeupReader(); err != nil { @@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + in := &input{ op: opRemoveWatch, path: filepath.Clean(name), @@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + w.mu.Lock() defer w.mu.Unlock() @@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string { // This should all be removed at some point, and just use windows.FILE_NOTIFY_* const ( sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 sysFSCREATE = 0x100 sysFSDELETE = 0x200 sysFSDELETESELF = 0x400 @@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { e.Op |= Rename } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } return e } @@ -321,10 +388,11 @@ const ( ) type input struct { - op int - path string - flags uint32 - reply chan error + op int + path string + flags uint32 + bufsize int + reply chan error } type inode struct { @@ -334,13 +402,14 @@ type inode struct { } type watch struct { - ov windows.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [65536]byte // 64K buffer + ov windows.Overlapped + ino *inode // i-number + recurse bool // Recursive watch? + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf []byte // buffer, allocated later } type ( @@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { +func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { + //pathname, recurse := recursivePath(pathname) + recurse := false + dir, err := w.getDir(pathname) if err != nil { return err @@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error { return os.NewSyscallError("CreateIoCompletionPort", err) } watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), + ino: ino, + path: dir, + names: make(map[string]uint64), + recurse: recurse, + buf: make([]byte, bufsize), } w.mu.Lock() w.watches.set(ino, watchEntry) @@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error { // Must run within the I/O thread. func (w *Watcher) remWatch(pathname string) error { + pathname, recurse := recursivePath(pathname) + dir, err := w.getDir(pathname) if err != nil { return err @@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error { watch := w.watches.get(ino) w.mu.Unlock() + if recurse && !watch.recurse { + return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) + } + err = windows.CloseHandle(ino.handle) if err != nil { w.sendError(os.NewSyscallError("CloseHandle", err)) @@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error { return nil } - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + // We need to pass the array, rather than the slice. + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, + (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), + watch.recurse, mask, nil, &watch.ov, 0) if rdErr != nil { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { @@ -563,9 +646,8 @@ func (w *Watcher) readEvents() { runtime.LockOSThread() for { + // This error is handled after the watch == nil check below. qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - // This error is handled after the watch == nil check below. NOTE: this - // seems odd, note sure if it's correct. watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { @@ -595,7 +677,7 @@ func (w *Watcher) readEvents() { case in := <-w.input: switch in.op { case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) + in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) case opRemoveWatch: in.reply <- w.remWatch(in.path) } @@ -605,6 +687,8 @@ func (w *Watcher) readEvents() { } switch qErr { + case nil: + // No error case windows.ERROR_MORE_DATA: if watch == nil { w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) @@ -626,13 +710,12 @@ func (w *Watcher) readEvents() { default: w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) continue - case nil: } var offset uint32 for { if n == 0 { - w.sendError(errors.New("short read in readEvents()")) + w.sendError(ErrEventOverflow) break } @@ -703,8 +786,9 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { + //lint:ignore ST1005 Windows should be capitalized w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed.")) + "Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE } - if mask&sysFSATTRIB != 0 { - m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES - } if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME } diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 30a5bf0f07..24c99cc499 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,13 +1,18 @@ -//go:build !plan9 -// +build !plan9 - // Package fsnotify provides a cross-platform interface for file system // notifications. +// +// Currently supported systems: +// +// Linux 2.6.32+ via inotify +// BSD, macOS via kqueue +// Windows via ReadDirectoryChangesW +// illumos via FEN package fsnotify import ( "errors" "fmt" + "path/filepath" "strings" ) @@ -33,34 +38,52 @@ type Op uint32 // The operations fsnotify can trigger; see the documentation on [Watcher] for a // full description, and check them with [Event.Has]. const ( + // A new pathname was created. Create Op = 1 << iota + + // The pathname was written to; this does *not* mean the write has finished, + // and a write can be followed by more writes. Write + + // The path was removed; any watches on it will be removed. Some "remove" + // operations may trigger a Rename if the file is actually moved (for + // example "remove to trash" is often a rename). Remove + + // The path was renamed to something else; any watched on it will be + // removed. Rename + + // File attributes were changed. + // + // It's generally not recommended to take action on this event, as it may + // get triggered very frequently by some software. For example, Spotlight + // indexing on macOS, anti-virus software, backup software, etc. Chmod ) -// Common errors that can be reported by a watcher +// Common errors that can be reported. var ( - ErrNonExistentWatch = errors.New("can't remove non-existent watcher") - ErrEventOverflow = errors.New("fsnotify queue overflow") + ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + ErrClosed = errors.New("fsnotify: watcher already closed") ) -func (op Op) String() string { +func (o Op) String() string { var b strings.Builder - if op.Has(Create) { + if o.Has(Create) { b.WriteString("|CREATE") } - if op.Has(Remove) { + if o.Has(Remove) { b.WriteString("|REMOVE") } - if op.Has(Write) { + if o.Has(Write) { b.WriteString("|WRITE") } - if op.Has(Rename) { + if o.Has(Rename) { b.WriteString("|RENAME") } - if op.Has(Chmod) { + if o.Has(Chmod) { b.WriteString("|CHMOD") } if b.Len() == 0 { @@ -70,7 +93,7 @@ func (op Op) String() string { } // Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h == h } +func (o Op) Has(h Op) bool { return o&h != 0 } // Has reports if this event has the given operation. func (e Event) Has(op Op) bool { return e.Op.Has(op) } @@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } func (e Event) String() string { return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } + +type ( + addOpt func(opt *withOpts) + withOpts struct { + bufsize int + } +) + +var defaultOpts = withOpts{ + bufsize: 65536, // 64K +} + +func getOptions(opts ...addOpt) withOpts { + with := defaultOpts + for _, o := range opts { + o(&with) + } + return with +} + +// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. +// +// This only has effect on Windows systems, and is a no-op for other backends. +// +// The default value is 64K (65536 bytes) which is the highest value that works +// on all filesystems and should be enough for most applications, but if you +// have a large burst of events it may not be enough. You can increase it if +// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). +// +// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +func WithBufferSize(bytes int) addOpt { + return func(opt *withOpts) { opt.bufsize = bytes } +} + +// Check if this path is recursive (ends with "/..." or "\..."), and return the +// path with the /... stripped. +func recursivePath(path string) (string, bool) { + if filepath.Base(path) == "..." { + return filepath.Dir(path), true + } + return path, false +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh index b09ef76834..99012ae653 100644 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -2,8 +2,8 @@ [ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 setopt err_exit no_unset pipefail extended_glob -# Simple script to update the godoc comments on all watchers. Probably took me -# more time to write this than doing it manually, but ah well 🙃 +# Simple script to update the godoc comments on all watchers so you don't need +# to update the same comment 5 times. watcher=$(<= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) +package. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -118,6 +142,91 @@ There are implementations for the following logging libraries: - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and +`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. `slogr` itself leaves that to the caller. + +## Using a `logr.Sink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +## Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `slogr.SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +## Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this +to fill this gap: + + func HandlerFromContext(ctx context.Context) slog.Handler { + logger, err := logr.FromContext(ctx) + if err == nil { + return slogr.NewSlogHandler(logger) + } + return slog.Default().Handler() + } + + func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { + return logr.NewContext(ctx, slogr.NewLogr(handler)) + } + +The downside is that storing and retrieving a `slog.Handler` needs more +allocations compared to using a `logr.Logger`. Therefore the recommendation is +to use the `logr.Logger` API in code which uses contextual logging. + ## FAQ ### Conceptual @@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 0000000000..1ca756fc7b --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index e52f0cd01e..12e5807cc5 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -116,17 +116,17 @@ type Options struct { // Equivalent hooks are offered for key-value pairs saved via // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} + RenderBuiltinsHook func(kvList []any) []any // RenderValuesHook is the same as RenderBuiltinsHook, except that it is // only called for key-value pairs saved via logr.Logger.WithValues. See // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} + RenderValuesHook func(kvList []any) []any // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} + RenderArgsHook func(kvList []any) []any // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // that contains a struct, etc.) it may log. Every time it finds a struct, @@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { return &l } -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { l.Formatter.AddValues(kvList) return &l } @@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { return &l } -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { +func (l fnlogger) Info(level int, msg string, kvList ...any) { prefix, args := l.FormatInfo(level, msg, kvList) l.write(prefix, args) } -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { +func (l fnlogger) Error(err error, msg string, kvList ...any) { prefix, args := l.FormatError(err, msg, kvList) l.write(prefix, args) } @@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { type Formatter struct { outputFormat outputFormat prefix string - values []interface{} + values []any valuesStr string depth int opts *Options @@ -246,10 +246,10 @@ const ( ) // PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} +type PseudoStruct []any // render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { +func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { @@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b return kvList } -func (f Formatter) pretty(value interface{}) string { +func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -343,7 +343,7 @@ const ( ) // TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { if depth > f.opts.MaxLogDepth { return `""` } @@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool { return false } -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { +func invokeMarshaler(m logr.Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("", r) @@ -675,12 +675,12 @@ func (f Formatter) caller() Caller { const noValue = "" -func (f Formatter) nonStringKey(v interface{}) string { +func (f Formatter) nonStringKey(v any) string { return fmt.Sprintf("", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { +func (f Formatter) snippet(v any) string { const snipLen = 16 snip := f.pretty(v) @@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { +func (f Formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } @@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int { // FormatInfo renders an Info log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref } // FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is +// empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre args = append(args, "caller", f.caller()) } args = append(args, "msg", msg) - var loggableErr interface{} + var loggableErr any if err != nil { loggableErr = err.Error() } args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) + return prefix, f.render(args, kvList) } // AddName appends the specified name. funcr uses '/' characters to separate @@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) { // AddValues adds key-value pairs to the set of saved values to be logged with // each log line. -func (f *Formatter) AddValues(kvList []interface{}) { +func (f *Formatter) AddValues(kvList []any) { // Three slice args forces a copy. n := len(f.values) f.values = append(f.values[:n:n], kvList...) diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index e027aea3fd..2a5075a180 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -127,9 +127,9 @@ limitations under the License. // such a value can call its methods without having to check whether the // instance is ready for use. // -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions @@ -258,6 +258,12 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } @@ -267,11 +273,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { +func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } - if l.Enabled() { + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } @@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } @@ -467,15 +480,15 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. @@ -546,5 +559,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/slogr/sloghandler.go new file mode 100644 index 0000000000..ec6725ce2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/sloghandler.go @@ -0,0 +1,168 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink logr.LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because NewLogr needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() logr.LogSink { + if sink, ok := l.sink.(logr.CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithAttrs(attrs) + copy.sink = copy.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + } + copy.sink = l.sink.WithValues(kvList...) + } + return © +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithGroup(name) + copy.sink = l.slogSink + } else { + copy.groupPrefix = copy.addGroupPrefix(name) + } + return © +} + +func (l *slogHandler) addGroupPrefix(name string) string { + if l.groupPrefix == "" { + return name + } + return l.groupPrefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a logr.LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original logr.Logger had a V level + if result < 0 { + result = 0 // because logr.LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 0000000000..eb519ae23f --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,108 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func NewLogr(handler slog.Handler) logr.Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return logr.Discard() + } + return logr.New(handler.sink).V(int(handler.levelBias)) + } + return logr.New(&slogSink{handler: handler}) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the logr.Logger: +// +// logger := +// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the logr.Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func NewSlogHandler(logger logr.Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in NewLogr +// and NewSlogHandler. +type SlogSink interface { + logr.LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogr/slogsink.go new file mode 100644 index 0000000000..6fbac561d9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogsink.go @@ -0,0 +1,122 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + "runtime" + "time" + + "github.com/go-logr/logr" +) + +var ( + _ logr.LogSink = &slogSink{} + _ logr.CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewLogr. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) logr.LogSink { + if l.name != "" { + l.name = l.name + "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go new file mode 100644 index 0000000000..4f26e92347 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/auth_info.go @@ -0,0 +1,77 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/base64" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// PassThroughAuth never manipulates the request +var PassThroughAuth runtime.ClientAuthInfoWriter + +func init() { + PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) +} + +// BasicAuth provides a basic auth info writer +func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + return r.SetHeaderParam(runtime.HeaderAuthorization, "Basic "+encoded) + }) +} + +// APIKeyAuth provides an API key auth info writer +func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { + if in == "query" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetQueryParam(name, value) + }) + } + + if in == "header" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(name, value) + }) + } + return nil +} + +// BearerToken provides a header based oauth2 bearer access token auth info writer +func BearerToken(token string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token) + }) +} + +// Compose combines multiple ClientAuthInfoWriters into a single one. +// Useful when multiple auth headers are needed. +func Compose(auths ...runtime.ClientAuthInfoWriter) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + for _, auth := range auths { + if auth == nil { + continue + } + if err := auth.AuthenticateRequest(r, nil); err != nil { + return err + } + } + return nil + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go new file mode 100644 index 0000000000..bc7b7fa418 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go @@ -0,0 +1,55 @@ +package client + +import ( + "io" + "net/http" + "sync/atomic" +) + +// KeepAliveTransport drains the remaining body from a response +// so that go will reuse the TCP connections. +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { + return &keepAliveTransport{wrapped: rt} +} + +type keepAliveTransport struct { + wrapped http.RoundTripper +} + +func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := k.wrapped.RoundTrip(r) + if err != nil { + return resp, err + } + resp.Body = &drainingReadCloser{rdr: resp.Body} + return resp, nil +} + +type drainingReadCloser struct { + rdr io.ReadCloser + seenEOF uint32 +} + +func (d *drainingReadCloser) Read(p []byte) (n int, err error) { + n, err = d.rdr.Read(p) + if err == io.EOF || n == 0 { + atomic.StoreUint32(&d.seenEOF, 1) + } + return +} + +func (d *drainingReadCloser) Close() error { + // drain buffer + if atomic.LoadUint32(&d.seenEOF) != 1 { + // If the reader side (a HTTP server) is misbehaving, it still may send + // some bytes, but the closer ignores them to keep the underling + // connection open. + //nolint:errcheck + io.Copy(io.Discard, d.rdr) + } + return d.rdr.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go new file mode 100644 index 0000000000..8a38ea3e94 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go @@ -0,0 +1,207 @@ +package client + +import ( + "fmt" + "net/http" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationVersion = "1.0.0" + tracerName = "go-openapi" +) + +type config struct { + Tracer trace.Tracer + Propagator propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + SpanNameFormatter func(*runtime.ClientOperation) string + TracerProvider trace.TracerProvider +} + +type OpenTelemetryOpt interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) OpenTelemetryOpt { + return optionFunc(func(c *config) { + if provider != nil { + c.TracerProvider = provider + } + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) OpenTelemetryOpt { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagator = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) OpenTelemetryOpt { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(op *runtime.ClientOperation) string) OpenTelemetryOpt { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +func defaultTransportFormatter(op *runtime.ClientOperation) string { + if op.ID != "" { + return op.ID + } + + return fmt.Sprintf("%s_%s", strings.ToLower(op.Method), op.PathPattern) +} + +type openTelemetryTransport struct { + transport runtime.ClientTransport + host string + tracer trace.Tracer + config *config +} + +func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, opts []OpenTelemetryOpt) *openTelemetryTransport { + tr := &openTelemetryTransport{ + transport: transport, + host: host, + } + + defaultOpts := []OpenTelemetryOpt{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + WithPropagators(otel.GetTextMapPropagator()), + WithTracerProvider(otel.GetTracerProvider()), + } + + c := newConfig(append(defaultOpts, opts...)...) + tr.config = c + + return tr +} + +func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (interface{}, error) { + if op.Context == nil { + return t.transport.Submit(op) + } + + params := op.Params + reader := op.Reader + + var span trace.Span + defer func() { + if span != nil { + span.End() + } + }() + + op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + span = t.newOpenTelemetrySpan(op, req.GetHeaderParams()) + return params.WriteToRequest(req, reg) + }) + + op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + if span != nil { + statusCode := response.Code() + span.SetAttributes(attribute.Int(string(semconv.HTTPStatusCodeKey), statusCode)) + span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindClient)) + } + + return reader.ReadResponse(response, consumer) + }) + + submit, err := t.transport.Submit(op) + if err != nil && span != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + + return submit, err +} + +func (t *openTelemetryTransport) newOpenTelemetrySpan(op *runtime.ClientOperation, header http.Header) trace.Span { + ctx := op.Context + + tracer := t.tracer + if tracer == nil { + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, t.config.SpanNameFormatter(op), t.config.SpanStartOptions...) + + var scheme string + if len(op.Schemes) > 0 { + scheme = op.Schemes[0] + } + + span.SetAttributes( + attribute.String("net.peer.name", t.host), + attribute.String(string(semconv.HTTPRouteKey), op.PathPattern), + attribute.String(string(semconv.HTTPMethodKey), op.Method), + attribute.String("span.kind", trace.SpanKindClient.String()), + attribute.String("http.scheme", scheme), + ) + + carrier := propagation.HeaderCarrier(header) + t.config.Propagator.Inject(ctx, carrier) + + return span +} + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(tracerName, trace.WithInstrumentationVersion(version())) +} + +func newConfig(opts ...OpenTelemetryOpt) *config { + c := &config{ + Propagator: otel.GetTextMapPropagator(), + } + + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + return c +} + +// Version is the current release version of the go-runtime instrumentation. +func version() string { + return instrumentationVersion +} diff --git a/vendor/github.com/go-openapi/runtime/client/opentracing.go b/vendor/github.com/go-openapi/runtime/client/opentracing.go new file mode 100644 index 0000000000..627286d12f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/opentracing.go @@ -0,0 +1,99 @@ +package client + +import ( + "fmt" + "net/http" + + "github.com/go-openapi/strfmt" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + + "github.com/go-openapi/runtime" +) + +type tracingTransport struct { + transport runtime.ClientTransport + host string + opts []opentracing.StartSpanOption +} + +func newOpenTracingTransport(transport runtime.ClientTransport, host string, opts []opentracing.StartSpanOption, +) runtime.ClientTransport { + return &tracingTransport{ + transport: transport, + host: host, + opts: opts, + } +} + +func (t *tracingTransport) Submit(op *runtime.ClientOperation) (interface{}, error) { + if op.Context == nil { + return t.transport.Submit(op) + } + + params := op.Params + reader := op.Reader + + var span opentracing.Span + defer func() { + if span != nil { + span.Finish() + } + }() + + op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + span = createClientSpan(op, req.GetHeaderParams(), t.host, t.opts) + return params.WriteToRequest(req, reg) + }) + + op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + if span != nil { + code := response.Code() + ext.HTTPStatusCode.Set(span, uint16(code)) + if code >= 400 { + ext.Error.Set(span, true) + } + } + return reader.ReadResponse(response, consumer) + }) + + submit, err := t.transport.Submit(op) + if err != nil && span != nil { + ext.Error.Set(span, true) + span.LogFields(log.Error(err)) + } + return submit, err +} + +func createClientSpan(op *runtime.ClientOperation, header http.Header, host string, + opts []opentracing.StartSpanOption) opentracing.Span { + ctx := op.Context + span := opentracing.SpanFromContext(ctx) + + if span != nil { + opts = append(opts, ext.SpanKindRPCClient) + span, _ = opentracing.StartSpanFromContextWithTracer( + ctx, span.Tracer(), operationName(op), opts...) + + ext.Component.Set(span, "go-openapi") + ext.PeerHostname.Set(span, host) + span.SetTag("http.path", op.PathPattern) + ext.HTTPMethod.Set(span, op.Method) + + _ = span.Tracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(header)) + + return span + } + return nil +} + +func operationName(op *runtime.ClientOperation) string { + if op.ID != "" { + return op.ID + } + return fmt.Sprintf("%s_%s", op.Method, op.PathPattern) +} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go new file mode 100644 index 0000000000..4c00ed3a5a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -0,0 +1,481 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// NewRequest creates a new swagger http client request +func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) { + return &request{ + pathPattern: pathPattern, + method: method, + writer: writer, + header: make(http.Header), + query: make(url.Values), + timeout: DefaultTimeout, + getBody: getRequestBuffer, + }, nil +} + +// Request represents a swagger client request. +// +// This Request struct converts to a HTTP request. +// There might be others that convert to other transports. +// There is no error checking here, it is assumed to be used after a spec has been validated. +// so impossible combinations should not arise (hopefully). +// +// The main purpose of this struct is to hide the machinery of adding params to a transport request. +// The generated code only implements what is necessary to turn a param into a valid value for these methods. +type request struct { + pathPattern string + method string + writer runtime.ClientRequestWriter + + pathParams map[string]string + header http.Header + query url.Values + formFields url.Values + fileFields map[string][]runtime.NamedReadCloser + payload interface{} + timeout time.Duration + buf *bytes.Buffer + + getBody func(r *request) []byte +} + +var ( + // ensure interface compliance + _ runtime.ClientRequest = new(request) +) + +func (r *request) isMultipart(mediaType string) bool { + if len(r.fileFields) > 0 { + return true + } + + return runtime.MultipartFormMime == mediaType +} + +// BuildHTTP creates a new http request based on the data from the params +func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { + return r.buildHTTP(mediaType, basePath, producers, registry, nil) +} +func escapeQuotes(s string) string { + return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s) +} + +func logClose(err error, pw *io.PipeWriter) { + log.Println(err) + closeErr := pw.CloseWithError(err) + if closeErr != nil { + log.Println(closeErr) + } +} + +func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { + // build the data + if err := r.writer.WriteToRequest(r, registry); err != nil { + return nil, err + } + + // Our body must be an io.Reader. + // When we create the http.Request, if we pass it a + // bytes.Buffer then it will wrap it in an io.ReadCloser + // and set the content length automatically. + var body io.Reader + var pr *io.PipeReader + var pw *io.PipeWriter + + r.buf = bytes.NewBuffer(nil) + if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { + body = r.buf + if r.isMultipart(mediaType) { + pr, pw = io.Pipe() + body = pr + } + } + + // check if this is a form type request + if len(r.formFields) > 0 || len(r.fileFields) > 0 { + if !r.isMultipart(mediaType) { + r.header.Set(runtime.HeaderContentType, mediaType) + formString := r.formFields.Encode() + r.buf.WriteString(formString) + goto DoneChoosingBodySource + } + + mp := multipart.NewWriter(pw) + r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) + + go func() { + defer func() { + mp.Close() + pw.Close() + }() + + for fn, v := range r.formFields { + for _, vi := range v { + if err := mp.WriteField(fn, vi); err != nil { + logClose(err, pw) + return + } + } + } + + defer func() { + for _, ff := range r.fileFields { + for _, ffi := range ff { + ffi.Close() + } + } + }() + for fn, f := range r.fileFields { + for _, fi := range f { + var fileContentType string + if p, ok := fi.(interface { + ContentType() string + }); ok { + fileContentType = p.ContentType() + } else { + // Need to read the data so that we can detect the content type + buf := make([]byte, 512) + size, err := fi.Read(buf) + if err != nil { + logClose(err, pw) + return + } + fileContentType = http.DetectContentType(buf) + fi = runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi)) + } + + // Create the MIME headers for the new part + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name())))) + h.Set("Content-Type", fileContentType) + + wrtr, err := mp.CreatePart(h) + if err != nil { + logClose(err, pw) + return + } + if _, err := io.Copy(wrtr, fi); err != nil { + logClose(err, pw) + } + } + } + }() + + goto DoneChoosingBodySource + } + + // if there is payload, use the producer to write the payload, and then + // set the header to the content-type appropriate for the payload produced + if r.payload != nil { + // TODO: infer most appropriate content type based on the producer used, + // and the `consumers` section of the spec/operation + r.header.Set(runtime.HeaderContentType, mediaType) + if rdr, ok := r.payload.(io.ReadCloser); ok { + body = rdr + goto DoneChoosingBodySource + } + + if rdr, ok := r.payload.(io.Reader); ok { + body = rdr + goto DoneChoosingBodySource + } + + producer := producers[mediaType] + if err := producer.Produce(r.buf, r.payload); err != nil { + return nil, err + } + } + +DoneChoosingBodySource: + + if runtime.CanHaveBody(r.method) && body != nil && r.header.Get(runtime.HeaderContentType) == "" { + r.header.Set(runtime.HeaderContentType, mediaType) + } + + if auth != nil { + // If we're not using r.buf as our http.Request's body, + // either the payload is an io.Reader or io.ReadCloser, + // or we're doing a multipart form/file. + // + // In those cases, if the AuthenticateRequest call asks for the body, + // we must read it into a buffer and provide that, then use that buffer + // as the body of our http.Request. + // + // This is done in-line with the GetBody() request rather than ahead + // of time, because there's no way to know if the AuthenticateRequest + // will even ask for the body of the request. + // + // If for some reason the copy fails, there's no way to return that + // error to the GetBody() call, so return it afterwards. + // + // An error from the copy action is prioritized over any error + // from the AuthenticateRequest call, because the mis-read + // body may have interfered with the auth. + // + var copyErr error + if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) { + var copied bool + r.getBody = func(r *request) []byte { + if copied { + return getRequestBuffer(r) + } + + defer func() { + copied = true + }() + + if _, copyErr = io.Copy(r.buf, body); copyErr != nil { + return nil + } + + if closer, ok := body.(io.ReadCloser); ok { + if copyErr = closer.Close(); copyErr != nil { + return nil + } + } + + body = r.buf + return getRequestBuffer(r) + } + } + + authErr := auth.AuthenticateRequest(r, registry) + + if copyErr != nil { + return nil, fmt.Errorf("error retrieving the response body: %v", copyErr) + } + + if authErr != nil { + return nil, authErr + } + } + + // In case the basePath or the request pathPattern include static query parameters, + // parse those out before constructing the final path. The parameters themselves + // will be merged with the ones set by the client, with the priority given first to + // the ones set by the client, then the path pattern, and lastly the base path. + basePathURL, err := url.Parse(basePath) + if err != nil { + return nil, err + } + staticQueryParams := basePathURL.Query() + + pathPatternURL, err := url.Parse(r.pathPattern) + if err != nil { + return nil, err + } + for name, values := range pathPatternURL.Query() { + if _, present := staticQueryParams[name]; present { + staticQueryParams.Del(name) + } + for _, value := range values { + staticQueryParams.Add(name, value) + } + } + + // create http request + var reinstateSlash bool + if pathPatternURL.Path != "" && pathPatternURL.Path != "/" && pathPatternURL.Path[len(pathPatternURL.Path)-1] == '/' { + reinstateSlash = true + } + + urlPath := path.Join(basePathURL.Path, pathPatternURL.Path) + for k, v := range r.pathParams { + urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1) + } + if reinstateSlash { + urlPath = urlPath + "/" + } + + req, err := http.NewRequest(r.method, urlPath, body) + if err != nil { + return nil, err + } + + originalParams := r.GetQueryParams() + + // Merge the query parameters extracted from the basePath with the ones set by + // the client in this struct. In case of conflict, the client wins. + for k, v := range staticQueryParams { + _, present := originalParams[k] + if !present { + if err = r.SetQueryParam(k, v...); err != nil { + return nil, err + } + } + } + + req.URL.RawQuery = r.query.Encode() + req.Header = r.header + + return req, nil +} + +func mangleContentType(mediaType, boundary string) string { + if strings.ToLower(mediaType) == runtime.URLencodedFormMime { + return fmt.Sprintf("%s; boundary=%s", mediaType, boundary) + } + return "multipart/form-data; boundary=" + boundary +} + +func (r *request) GetMethod() string { + return r.method +} + +func (r *request) GetPath() string { + path := r.pathPattern + for k, v := range r.pathParams { + path = strings.Replace(path, "{"+k+"}", v, -1) + } + return path +} + +func (r *request) GetBody() []byte { + return r.getBody(r) +} + +func getRequestBuffer(r *request) []byte { + if r.buf == nil { + return nil + } + return r.buf.Bytes() +} + +// SetHeaderParam adds a header param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetHeaderParam(name string, values ...string) error { + if r.header == nil { + r.header = make(http.Header) + } + r.header[http.CanonicalHeaderKey(name)] = values + return nil +} + +// GetHeaderParams returns the all headers currently set for the request +func (r *request) GetHeaderParams() http.Header { + return r.header +} + +// SetQueryParam adds a query param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetQueryParam(name string, values ...string) error { + if r.query == nil { + r.query = make(url.Values) + } + r.query[name] = values + return nil +} + +// GetQueryParams returns a copy of all query params currently set for the request +func (r *request) GetQueryParams() url.Values { + var result = make(url.Values) + for key, value := range r.query { + result[key] = append([]string{}, value...) + } + return result +} + +// SetFormParam adds a forn param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetFormParam(name string, values ...string) error { + if r.formFields == nil { + r.formFields = make(url.Values) + } + r.formFields[name] = values + return nil +} + +// SetPathParam adds a path param to the request +func (r *request) SetPathParam(name string, value string) error { + if r.pathParams == nil { + r.pathParams = make(map[string]string) + } + + r.pathParams[name] = value + return nil +} + +// SetFileParam adds a file param to the request +func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { + for _, file := range files { + if actualFile, ok := file.(*os.File); ok { + fi, err := os.Stat(actualFile.Name()) + if err != nil { + return err + } + if fi.IsDir() { + return fmt.Errorf("%q is a directory, only files are supported", file.Name()) + } + } + } + + if r.fileFields == nil { + r.fileFields = make(map[string][]runtime.NamedReadCloser) + } + if r.formFields == nil { + r.formFields = make(url.Values) + } + + r.fileFields[name] = files + return nil +} + +func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser { + return r.fileFields +} + +// SetBodyParam sets a body parameter on the request. +// This does not yet serialze the object, this happens as late as possible. +func (r *request) SetBodyParam(payload interface{}) error { + r.payload = payload + return nil +} + +func (r *request) GetBodyParam() interface{} { + return r.payload +} + +// SetTimeout sets the timeout for a request +func (r *request) SetTimeout(timeout time.Duration) error { + r.timeout = timeout + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go new file mode 100644 index 0000000000..0bbd388bc8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/response.go @@ -0,0 +1,50 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "io" + "net/http" + + "github.com/go-openapi/runtime" +) + +var _ runtime.ClientResponse = response{} + +func newResponse(resp *http.Response) runtime.ClientResponse { return response{resp: resp} } + +type response struct { + resp *http.Response +} + +func (r response) Code() int { + return r.resp.StatusCode +} + +func (r response) Message() string { + return r.resp.Status +} + +func (r response) GetHeader(name string) string { + return r.resp.Header.Get(name) +} + +func (r response) GetHeaders(name string) []string { + return r.resp.Header.Values(name) +} + +func (r response) Body() io.ReadCloser { + return r.resp.Body +} diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go new file mode 100644 index 0000000000..ccec04138b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -0,0 +1,537 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "mime" + "net/http" + "net/http/httputil" + "os" + "strings" + "sync" + "time" + + "github.com/opentracing/opentracing-go" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/runtime/yamlpc" + "github.com/go-openapi/strfmt" +) + +// TLSClientOptions to configure client authentication with mutual TLS +type TLSClientOptions struct { + // Certificate is the path to a PEM-encoded certificate to be used for + // client authentication. If set then Key must also be set. + Certificate string + + // LoadedCertificate is the certificate to be used for client authentication. + // This field is ignored if Certificate is set. If this field is set, LoadedKey + // is also required. + LoadedCertificate *x509.Certificate + + // Key is the path to an unencrypted PEM-encoded private key for client + // authentication. This field is required if Certificate is set. + Key string + + // LoadedKey is the key for client authentication. This field is required if + // LoadedCertificate is set. + LoadedKey crypto.PrivateKey + + // CA is a path to a PEM-encoded certificate that specifies the root certificate + // to use when validating the TLS certificate presented by the server. If this field + // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA + // is set. + CA string + + // LoadedCA specifies the root certificate to use when validating the server's TLS certificate. + // If this field (and CA) is not set, the system certificate pool is used. + LoadedCA *x509.Certificate + + // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate. + // If set, it will be combined with the the other loaded certificates (see LoadedCA and CA). + // If neither LoadedCA or CA is set, the provided pool with override the system + // certificate pool. + // The caller must not use the supplied pool after calling TLSClientAuth. + LoadedCAPool *x509.CertPool + + // ServerName specifies the hostname to use when verifying the server certificate. + // If this field is set then InsecureSkipVerify will be ignored and treated as + // false. + ServerName string + + // InsecureSkipVerify controls whether the certificate chain and hostname presented + // by the server are validated. If true, any certificate is accepted. + InsecureSkipVerify bool + + // VerifyPeerCertificate, if not nil, is called after normal + // certificate verification. It receives the raw ASN.1 certificates + // provided by the peer and also any verified chains that normal processing found. + // If it returns a non-nil error, the handshake is aborted and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. If normal verification is disabled by + // setting InsecureSkipVerify then this callback will be considered but + // the verifiedChains argument will always be nil. + VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + + // SessionTicketsDisabled may be set to true to disable session ticket and + // PSK (resumption) support. Note that on clients, session ticket support is + // also disabled if ClientSessionCache is nil. + SessionTicketsDisabled bool + + // ClientSessionCache is a cache of ClientSessionState entries for TLS + // session resumption. It is only used by clients. + ClientSessionCache tls.ClientSessionCache + + // Prevents callers using unkeyed fields. + _ struct{} +} + +// TLSClientAuth creates a tls.Config for mutual auth +func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { + // create client tls config + cfg := &tls.Config{} + + // load client cert if specified + if opts.Certificate != "" { + cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } else if opts.LoadedCertificate != nil { + block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw} + certPem := pem.EncodeToMemory(&block) + + var keyBytes []byte + switch k := opts.LoadedKey.(type) { + case *rsa.PrivateKey: + keyBytes = x509.MarshalPKCS1PrivateKey(k) + case *ecdsa.PrivateKey: + var err error + keyBytes, err = x509.MarshalECPrivateKey(k) + if err != nil { + return nil, fmt.Errorf("tls client priv key: %v", err) + } + default: + return nil, fmt.Errorf("tls client priv key: unsupported key type") + } + + block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes} + keyPem := pem.EncodeToMemory(&block) + + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } + + cfg.InsecureSkipVerify = opts.InsecureSkipVerify + + cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate + cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled + cfg.ClientSessionCache = opts.ClientSessionCache + + // When no CA certificate is provided, default to the system cert pool + // that way when a request is made to a server known by the system trust store, + // the name is still verified + if opts.LoadedCA != nil { + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AddCert(opts.LoadedCA) + cfg.RootCAs = caCertPool + } else if opts.CA != "" { + // load ca cert + caCert, err := os.ReadFile(opts.CA) + if err != nil { + return nil, fmt.Errorf("tls client ca: %v", err) + } + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AppendCertsFromPEM(caCert) + cfg.RootCAs = caCertPool + } else if opts.LoadedCAPool != nil { + cfg.RootCAs = opts.LoadedCAPool + } + + // apply servername overrride + if opts.ServerName != "" { + cfg.InsecureSkipVerify = false + cfg.ServerName = opts.ServerName + } + + return cfg, nil +} + +func basePool(pool *x509.CertPool) *x509.CertPool { + if pool == nil { + return x509.NewCertPool() + } + return pool +} + +// TLSTransport creates a http client transport suitable for mutual tls auth +func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { + cfg, err := TLSClientAuth(opts) + if err != nil { + return nil, err + } + + return &http.Transport{TLSClientConfig: cfg}, nil +} + +// TLSClient creates a http.Client for mutual auth +func TLSClient(opts TLSClientOptions) (*http.Client, error) { + transport, err := TLSTransport(opts) + if err != nil { + return nil, err + } + return &http.Client{Transport: transport}, nil +} + +// DefaultTimeout the default request timeout +var DefaultTimeout = 30 * time.Second + +// Runtime represents an API client that uses the transport +// to make http requests based on a swagger specification. +type Runtime struct { + DefaultMediaType string + DefaultAuthentication runtime.ClientAuthInfoWriter + Consumers map[string]runtime.Consumer + Producers map[string]runtime.Producer + + Transport http.RoundTripper + Jar http.CookieJar + // Spec *spec.Document + Host string + BasePath string + Formats strfmt.Registry + Context context.Context + + Debug bool + logger logger.Logger + + clientOnce *sync.Once + client *http.Client + schemes []string + response ClientResponseFunc +} + +// New creates a new default runtime for a swagger api runtime.Client +func New(host, basePath string, schemes []string) *Runtime { + var rt Runtime + rt.DefaultMediaType = runtime.JSONMime + + // TODO: actually infer this stuff from the spec + rt.Consumers = map[string]runtime.Consumer{ + runtime.YAMLMime: yamlpc.YAMLConsumer(), + runtime.JSONMime: runtime.JSONConsumer(), + runtime.XMLMime: runtime.XMLConsumer(), + runtime.TextMime: runtime.TextConsumer(), + runtime.HTMLMime: runtime.TextConsumer(), + runtime.CSVMime: runtime.CSVConsumer(), + runtime.DefaultMime: runtime.ByteStreamConsumer(), + } + rt.Producers = map[string]runtime.Producer{ + runtime.YAMLMime: yamlpc.YAMLProducer(), + runtime.JSONMime: runtime.JSONProducer(), + runtime.XMLMime: runtime.XMLProducer(), + runtime.TextMime: runtime.TextProducer(), + runtime.HTMLMime: runtime.TextProducer(), + runtime.CSVMime: runtime.CSVProducer(), + runtime.DefaultMime: runtime.ByteStreamProducer(), + } + rt.Transport = http.DefaultTransport + rt.Jar = nil + rt.Host = host + rt.BasePath = basePath + rt.Context = context.Background() + rt.clientOnce = new(sync.Once) + if !strings.HasPrefix(rt.BasePath, "/") { + rt.BasePath = "/" + rt.BasePath + } + + rt.Debug = logger.DebugEnabled() + rt.logger = logger.StandardLogger{} + rt.response = newResponse + + if len(schemes) > 0 { + rt.schemes = schemes + } + return &rt +} + +// NewWithClient allows you to create a new transport with a configured http.Client +func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { + rt := New(host, basePath, schemes) + if client != nil { + rt.clientOnce.Do(func() { + rt.client = client + }) + } + return rt +} + +// WithOpenTracing adds opentracing support to the provided runtime. +// A new client span is created for each request. +// If the context of the client operation does not contain an active span, no span is created. +// The provided opts are applied to each spans - for example to add global tags. +func (r *Runtime) WithOpenTracing(opts ...opentracing.StartSpanOption) runtime.ClientTransport { + return newOpenTracingTransport(r, r.Host, opts) +} + +// WithOpenTelemetry adds opentelemetry support to the provided runtime. +// A new client span is created for each request. +// If the context of the client operation does not contain an active span, no span is created. +// The provided opts are applied to each spans - for example to add global tags. +func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTransport { + return newOpenTelemetryTransport(r, r.Host, opts) +} + +func (r *Runtime) pickScheme(schemes []string) string { + if v := r.selectScheme(r.schemes); v != "" { + return v + } + if v := r.selectScheme(schemes); v != "" { + return v + } + return "http" +} + +func (r *Runtime) selectScheme(schemes []string) string { + schLen := len(schemes) + if schLen == 0 { + return "" + } + + scheme := schemes[0] + // prefer https, but skip when not possible + if scheme != "https" && schLen > 1 { + for _, sch := range schemes { + if sch == "https" { + scheme = sch + break + } + } + } + return scheme +} + +func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { + if left == nil { + return right + } + return left +} + +// EnableConnectionReuse drains the remaining body from a response +// so that go will reuse the TCP connections. +// +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func (r *Runtime) EnableConnectionReuse() { + if r.client == nil { + r.Transport = KeepAliveTransport( + transportOrDefault(r.Transport, http.DefaultTransport), + ) + return + } + + r.client.Transport = KeepAliveTransport( + transportOrDefault(r.client.Transport, + transportOrDefault(r.Transport, http.DefaultTransport), + ), + ) +} + +// takes a client operation and creates equivalent http.Request +func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { + params, _, auth := operation.Params, operation.Reader, operation.AuthInfo + + request, err := newRequest(operation.Method, operation.PathPattern, params) + if err != nil { + return nil, nil, err + } + + var accept []string + accept = append(accept, operation.ProducesMediaTypes...) + if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { + return nil, nil, err + } + + if auth == nil && r.DefaultAuthentication != nil { + auth = runtime.ClientAuthInfoWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + if req.GetHeaderParams().Get(runtime.HeaderAuthorization) != "" { + return nil + } + return r.DefaultAuthentication.AuthenticateRequest(req, reg) + }) + } + // if auth != nil { + // if err := auth.AuthenticateRequest(request, r.Formats); err != nil { + // return nil, err + // } + //} + + // TODO: pick appropriate media type + cmt := r.DefaultMediaType + for _, mediaType := range operation.ConsumesMediaTypes { + // Pick first non-empty media type + if mediaType != "" { + cmt = mediaType + break + } + } + + if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime { + return nil, nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt) + } + + req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth) + if err != nil { + return nil, nil, err + } + req.URL.Scheme = r.pickScheme(operation.Schemes) + req.URL.Host = r.Host + req.Host = r.Host + return request, req, nil +} + +func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) { + _, req, err = r.createHttpRequest(operation) + return +} + +// Submit a request and when there is a body on success it will turn that into the result +// all other things are turned into an api error for swagger which retains the status code +func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) { + _, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo + + request, req, err := r.createHttpRequest(operation) + if err != nil { + return nil, err + } + + r.clientOnce.Do(func() { + r.client = &http.Client{ + Transport: r.Transport, + Jar: r.Jar, + } + }) + + if r.Debug { + b, err2 := httputil.DumpRequestOut(req, true) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + var hasTimeout bool + pctx := operation.Context + if pctx == nil { + pctx = r.Context + } else { + hasTimeout = true + } + if pctx == nil { + pctx = context.Background() + } + var ctx context.Context + var cancel context.CancelFunc + if hasTimeout { + ctx, cancel = context.WithCancel(pctx) + } else { + ctx, cancel = context.WithTimeout(pctx, request.timeout) + } + defer cancel() + + client := operation.Client + if client == nil { + client = r.client + } + req = req.WithContext(ctx) + res, err := client.Do(req) // make requests, by default follows 10 redirects before failing + if err != nil { + return nil, err + } + defer res.Body.Close() + + ct := res.Header.Get(runtime.HeaderContentType) + if ct == "" { // this should really really never occur + ct = r.DefaultMediaType + } + + if r.Debug { + printBody := true + if ct == runtime.DefaultMime { + printBody = false // Spare the terminal from a binary blob. + } + b, err2 := httputil.DumpResponse(res, printBody) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + mt, _, err := mime.ParseMediaType(ct) + if err != nil { + return nil, fmt.Errorf("parse content type: %s", err) + } + + cons, ok := r.Consumers[mt] + if !ok { + if cons, ok = r.Consumers["*/*"]; !ok { + // scream about not knowing what to do + return nil, fmt.Errorf("no consumer: %q", ct) + } + } + return readResponse.ReadResponse(r.response(res), cons) +} + +// SetDebug changes the debug flag. +// It ensures that client and middlewares have the set debug level. +func (r *Runtime) SetDebug(debug bool) { + r.Debug = debug + middleware.Debug = debug +} + +// SetLogger changes the logger stream. +// It ensures that client and middlewares use the same logger. +func (r *Runtime) SetLogger(logger logger.Logger) { + r.logger = logger + middleware.Logger = logger +} + +type ClientResponseFunc = func(*http.Response) runtime.ClientResponse + +// SetResponseReader changes the response reader implementation. +func (r *Runtime) SetResponseReader(f ClientResponseFunc) { + if f == nil { + return + } + r.response = f +} diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go new file mode 100644 index 0000000000..b30d377126 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -0,0 +1,40 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yamlpc + +import ( + "io" + + "github.com/go-openapi/runtime" + + "gopkg.in/yaml.v2" +) + +// YAMLConsumer creates a consumer for yaml data +func YAMLConsumer() runtime.Consumer { + return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error { + dec := yaml.NewDecoder(r) + return dec.Decode(v) + }) +} + +// YAMLProducer creates a producer for yaml data +func YAMLProducer() runtime.Producer { + return runtime.ProducerFunc(func(w io.Writer, v interface{}) error { + enc := yaml.NewEncoder(w) + defer enc.Close() + return enc.Encode(v) + }) +} diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index be4899cb12..22f8d21cca 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -4,56 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 31 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: - enable: - - revive - - goimports - - gosec + enable-all: true + disable: + - maligned - unparam - - unconvert - - predeclared - - prealloc - - misspell - - # disable: - # - maligned - # - lll - # - gochecknoinits - # - gochecknoglobals - # - godox - # - gocognit - # - whitespace - # - wsl - # - funlen - # - wrapcheck - # - testpackage - # - nlreturn - # - gofumpt - # - goerr113 - # - gci - # - gomnd - # - godot - # - exhaustivestruct - # - paralleltest - # - varnamelen - # - ireturn - # - exhaustruct - # #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index 0cf89d7766..f6b39c6c56 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,8 +1,7 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) This package exposes a registry of data types to support string formats in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index a8a3604a2c..cfa9a526fe 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool { // ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) // // swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck // NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck oid, err := bsonprim.ObjectIDFromHex(hex) if err != nil { panic(err) @@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error { // BSON document if the error is nil. func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil + return bson.TypeObjectID, oid[:], nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ad3b3c355b..ce99ce521b 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -94,7 +94,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { } // MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) { if from.Kind() != reflect.String { return obj, nil diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 9bef4c3b33..f08ba4da5d 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -76,6 +76,8 @@ const ( ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) @@ -84,7 +86,7 @@ var ( rxDateTime = regexp.MustCompile(DateTimePattern) // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis @@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i64)) - return bsontype.DateTime, buf, nil + return bson.TypeDateTime, buf, nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a @@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bsontype.Null { + if tpe == bson.TypeNull { *t = DateTime{} return nil } diff --git a/vendor/github.com/hashicorp/consul/api/.copywrite.hcl b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl new file mode 100644 index 0000000000..34d99ba25e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2023 + + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE index c72625e4cc..7c5baa45e1 100644 --- a/vendor/github.com/hashicorp/consul/api/LICENSE +++ b/vendor/github.com/hashicorp/consul/api/LICENSE @@ -1,92 +1,92 @@ -Copyright (c) 2013 HashiCorp, Inc. +Copyright (c) 2020 HashiCorp, Inc. Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. “Patent Claims” of a Contributor +1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -1.12. “Secondary License” +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -102,57 +102,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -165,11 +167,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -181,39 +184,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -222,14 +226,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -237,21 +241,22 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -260,16 +265,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -281,27 +286,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -315,23 +322,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -342,15 +350,16 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 48d2e66ee9..47b38eb6ca 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -19,6 +19,13 @@ const ( // ACLManagementType is the management type token ACLManagementType = "management" + + // ACLTemplatedPolicy names + ACLTemplatedPolicyServiceName = "builtin/service" + ACLTemplatedPolicyNodeName = "builtin/node" + ACLTemplatedPolicyDNSName = "builtin/dns" + ACLTemplatedPolicyNomadServerName = "builtin/nomad-server" + ACLTemplatedPolicyWorkloadIdentityName = "builtin/workload-identity" ) type ACLLink struct { @@ -40,6 +47,7 @@ type ACLToken struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Local bool AuthMethod string `json:",omitempty"` ExpirationTTL time.Duration `json:",omitempty"` @@ -88,6 +96,7 @@ type ACLTokenListEntry struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Local bool AuthMethod string `json:",omitempty"` ExpirationTime *time.Time `json:",omitempty"` @@ -148,6 +157,27 @@ type ACLNodeIdentity struct { Datacenter string } +// ACLTemplatedPolicy represents a template used to generate a `synthetic` policy +// given some input variables. +type ACLTemplatedPolicy struct { + TemplateName string + TemplateVariables *ACLTemplatedPolicyVariables `json:",omitempty"` + + // Datacenters are an artifact of Nodeidentity & ServiceIdentity. + // It is used to facilitate the future migration away from both + Datacenters []string `json:",omitempty"` +} + +type ACLTemplatedPolicyResponse struct { + TemplateName string + Schema string + Template string +} + +type ACLTemplatedPolicyVariables struct { + Name string +} + // ACLPolicy represents an ACL Policy. type ACLPolicy struct { ID string @@ -196,6 +226,7 @@ type ACLRole struct { Policies []*ACLRolePolicyLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Hash []byte CreateIndex uint64 ModifyIndex uint64 @@ -218,6 +249,12 @@ const ( // BindingRuleBindTypeRole binds to pre-existing roles with the given name. BindingRuleBindTypeRole BindingRuleBindType = "role" + + // BindingRuleBindTypeNode binds to a node identity with given name. + BindingRuleBindTypeNode BindingRuleBindType = "node" + + // BindingRuleBindTypeTemplatedPolicy binds to a templated policy with given template name and variables. + BindingRuleBindTypeTemplatedPolicy BindingRuleBindType = "templated-policy" ) type ACLBindingRule struct { @@ -227,6 +264,7 @@ type ACLBindingRule struct { Selector string BindType BindingRuleBindType BindName string + BindVars *ACLTemplatedPolicyVariables `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -1623,3 +1661,78 @@ func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLTo } return &out, wm, nil } + +// TemplatedPolicyReadByName retrieves the templated policy details (by name). Returns nil if not found. +func (a *ACL) TemplatedPolicyReadByName(templateName string, q *QueryOptions) (*ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policy/name/"+templateName) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLTemplatedPolicyResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TemplatedPolicyList retrieves a listing of all templated policies. +func (a *ACL) TemplatedPolicyList(q *QueryOptions) (map[string]ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policies") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries map[string]ACLTemplatedPolicyResponse + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TemplatedPolicyPreview is used to preview the policy rendered by the templated policy. +func (a *ACL) TemplatedPolicyPreview(tp *ACLTemplatedPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/templated-policy/preview/"+tp.TemplateName) + r.setWriteOptions(q) + r.obj = tp.TemplateVariables + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 6775edf425..24e2c50d64 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -307,6 +307,10 @@ type ServiceRegisterOpts struct { // having to manually deregister checks. ReplaceExistingChecks bool + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + // ctx is an optional context pass through to the underlying HTTP // request layer. Use WithContext() to set the context. ctx context.Context @@ -835,6 +839,9 @@ func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceR if opts.ReplaceExistingChecks { r.params.Set("replace-existing-checks", "true") } + if opts.Token != "" { + r.header.Set("X-Consul-Token", opts.Token) + } _, resp, err := a.c.doRequest(r) if err != nil { return err @@ -991,7 +998,14 @@ func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) e // CheckRegister is used to register a new check with // the local agent func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + return a.CheckRegisterOpts(check, nil) +} + +// CheckRegisterOpts is used to register a new check with +// the local agent using query options +func (a *Agent) CheckRegisterOpts(check *AgentCheckRegistration, q *QueryOptions) error { r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.setQueryOptions(q) r.obj = check _, resp, err := a.c.doRequest(r) if err != nil { @@ -1379,6 +1393,10 @@ func (a *Agent) UpdateConfigFileRegistrationToken(token string, q *WriteOptions) return a.updateToken("config_file_service_registration", token, q) } +func (a *Agent) UpdateDNSToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("dns", token, q) +} + // updateToken can be used to update one of an agent's ACL tokens after the agent has // started. The tokens are may not be persisted, so will need to be updated again if // the agent is restarted unless the agent is configured to persist them. diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index 405e92ef27..b59c20fd30 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -39,11 +39,12 @@ const ( ) const ( - BuiltinAWSLambdaExtension string = "builtin/aws/lambda" - BuiltinExtAuthzExtension string = "builtin/ext-authz" - BuiltinLuaExtension string = "builtin/lua" - BuiltinPropertyOverrideExtension string = "builtin/property-override" - BuiltinWasmExtension string = "builtin/wasm" + BuiltinAWSLambdaExtension string = "builtin/aws/lambda" + BuiltinExtAuthzExtension string = "builtin/ext-authz" + BuiltinLuaExtension string = "builtin/lua" + BuiltinOTELAccessLoggingExtension string = "builtin/otel-access-logging" + BuiltinPropertyOverrideExtension string = "builtin/property-override" + BuiltinWasmExtension string = "builtin/wasm" // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured // extension type, as it is only used indirectly via troubleshooting tools. It is included here // for common reference alongside other builtin extensions. @@ -314,6 +315,47 @@ type UpstreamLimits struct { MaxConcurrentRequests *int `alias:"max_concurrent_requests"` } +// RateLimits is rate limiting configuration that is applied to +// inbound traffic for a service. +// Rate limiting is a Consul enterprise feature. +type RateLimits struct { + InstanceLevel InstanceLevelRateLimits `alias:"instance_level"` +} + +// InstanceLevelRateLimits represents rate limit configuration +// that are applied per service instance. +type InstanceLevelRateLimits struct { + // RequestsPerSecond is the average number of requests per second that can be + // made without being throttled. This field is required if RequestsMaxBurst + // is set. The allowed number of requests may exceed RequestsPerSecond up to + // the value specified in RequestsMaxBurst. + // + // Internally, this is the refill rate of the token bucket used for rate limiting. + RequestsPerSecond int `alias:"requests_per_second"` + + // RequestsMaxBurst is the maximum number of requests that can be sent + // in a burst. Should be equal to or greater than RequestsPerSecond. + // If unset, defaults to RequestsPerSecond. + // + // Internally, this is the maximum size of the token bucket used for rate limiting. + RequestsMaxBurst int `alias:"requests_max_burst"` + + // Routes is a list of rate limits applied to specific routes. + // Overrides any top-level configuration. + Routes []InstanceLevelRouteRateLimits +} + +// InstanceLevelRouteRateLimits represents rate limit configuration +// applied to a route matching one of PathExact/PathPrefix/PathRegex. +type InstanceLevelRouteRateLimits struct { + PathExact string `alias:"path_exact"` + PathPrefix string `alias:"path_prefix"` + PathRegex string `alias:"path_regex"` + + RequestsPerSecond int `alias:"requests_per_second"` + RequestsMaxBurst int `alias:"requests_max_burst"` +} + type ServiceConfigEntry struct { Kind string Name string @@ -332,6 +374,7 @@ type ServiceConfigEntry struct { LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` + RateLimits *RateLimits `json:",omitempty" alias:"rate_limits"` EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` Meta map[string]string `json:",omitempty"` CreateIndex uint64 diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index 3696f7be55..eeb3a1074c 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -6,6 +6,8 @@ package api import ( "encoding/json" "time" + + "github.com/hashicorp/go-multierror" ) type ServiceRouterConfigEntry struct { @@ -189,14 +191,19 @@ func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) { type Alias ServiceResolverConfigEntry exported := &struct { ConnectTimeout string `json:",omitempty"` + RequestTimeout string `json:",omitempty"` *Alias }{ ConnectTimeout: e.ConnectTimeout.String(), + RequestTimeout: e.RequestTimeout.String(), Alias: (*Alias)(e), } if e.ConnectTimeout == 0 { exported.ConnectTimeout = "" } + if e.RequestTimeout == 0 { + exported.RequestTimeout = "" + } return json.Marshal(exported) } @@ -205,20 +212,27 @@ func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error { type Alias ServiceResolverConfigEntry aux := &struct { ConnectTimeout string + RequestTimeout string *Alias }{ Alias: (*Alias)(e), } - if err := json.Unmarshal(data, &aux); err != nil { + var err error + if err = json.Unmarshal(data, &aux); err != nil { return err } - var err error + var merr *multierror.Error if aux.ConnectTimeout != "" { if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { - return err + merr = multierror.Append(merr, err) } } - return nil + if aux.RequestTimeout != "" { + if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr.ErrorOrNil() } func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index b59f1c0621..baf274e2da 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -284,6 +284,10 @@ type APIGatewayListener struct { Protocol string // TLS is the TLS settings for the listener. TLS APIGatewayTLSConfiguration + // Override is the policy that overrides all other policy and route specific configuration + Override *APIGatewayPolicy `json:",omitempty"` + // Default is the policy that is the default for the listener and route, routes can override this behavior + Default *APIGatewayPolicy `json:",omitempty"` } // APIGatewayTLSConfiguration specifies the configuration of a listener’s @@ -302,3 +306,39 @@ type APIGatewayTLSConfiguration struct { // Only applicable to connections negotiated via TLS 1.2 or earlier CipherSuites []string `json:",omitempty" alias:"cipher_suites"` } + +// APIGatewayPolicy holds the policy that configures the gateway listener, this is used in the `Override` and `Default` fields of a listener +type APIGatewayPolicy struct { + // JWT holds the JWT configuration for the Listener + JWT *APIGatewayJWTRequirement `json:",omitempty"` +} + +// APIGatewayJWTRequirement holds the list of JWT providers to be verified against +type APIGatewayJWTRequirement struct { + // Providers is a list of providers to consider when verifying a JWT. + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + +// APIGatewayJWTProvider holds the provider and claim verification information +type APIGatewayJWTProvider struct { + // Name is the name of the JWT provider. There MUST be a corresponding + // "jwt-provider" config entry with this name. + Name string `json:",omitempty"` + + // VerifyClaims is a list of additional claims to verify in a JWT's payload. + VerifyClaims []*APIGatewayJWTClaimVerification `json:",omitempty" alias:"verify_claims"` +} + +// APIGatewayJWTClaimVerification holds the actual claim information to be verified +type APIGatewayJWTClaimVerification struct { + // Path is the path to the claim in the token JSON. + Path []string `json:",omitempty"` + + // Value is the expected value at the given path: + // - If the type at the path is a list then we verify + // that this value is contained in the list. + // + // - If the type at the path is a string then we verify + // that this value matches. + Value string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go index 8df7d4c98e..7af2a2658f 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go @@ -4,8 +4,8 @@ package api type ReadWriteRatesConfig struct { - ReadRate float64 - WriteRate float64 + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` } type RateLimitIPConfigEntry struct { @@ -16,8 +16,8 @@ type RateLimitIPConfigEntry struct { Meta map[string]string `json:",omitempty"` // overall limits - ReadRate float64 - WriteRate float64 + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` //limits specific to a type of call ACL *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryACL OperationCategory = "ACL" diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go index cfea394535..bbaa032d50 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go @@ -3,6 +3,8 @@ package api +import "time" + // TCPRouteConfigEntry -- TODO stub type TCPRouteConfigEntry struct { // Kind of the config entry. This should be set to api.TCPRoute. @@ -195,8 +197,17 @@ type HTTPQueryMatch struct { // HTTPFilters specifies a list of filters used to modify a request // before it is routed to an upstream. type HTTPFilters struct { - Headers []HTTPHeaderFilter - URLRewrite *URLRewrite + Headers []HTTPHeaderFilter + URLRewrite *URLRewrite + RetryFilter *RetryFilter + TimeoutFilter *TimeoutFilter + JWT *JWTFilter +} + +// HTTPResponseFilters specifies a list of filters used to modify a +// response returned by an upstream +type HTTPResponseFilters struct { + Headers []HTTPHeaderFilter } // HTTPHeaderFilter specifies how HTTP headers should be modified. @@ -210,12 +221,32 @@ type URLRewrite struct { Path string } +type RetryFilter struct { + NumRetries uint32 + RetryOn []string + RetryOnStatusCodes []uint32 + RetryOnConnectFailure bool +} + +type TimeoutFilter struct { + RequestTimeout time.Duration + IdleTimeout time.Duration +} + +// JWTFilter specifies the JWT configuration for a route +type JWTFilter struct { + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + // HTTPRouteRule specifies the routing rules used to determine what upstream // service an HTTP request is routed to. type HTTPRouteRule struct { // Filters is a list of HTTP-based filters used to modify a request prior // to routing it to the upstream service Filters HTTPFilters + // ResponseFilters is a list of HTTP-based filters used to modify a response + // returned by the upstream service + ResponseFilters HTTPResponseFilters // Matches specified the matching criteria used in the routing table. If a // request matches the given HTTPMatch configuration, then traffic is routed // to services specified in the Services field. @@ -231,10 +262,15 @@ type HTTPService struct { // Weight is an arbitrary integer used in calculating how much // traffic should be sent to the given service. Weight int + // Filters is a list of HTTP-based filters used to modify a request prior // to routing it to the upstream service Filters HTTPFilters + // ResponseFilters is a list of HTTP-based filters used to modify the + // response returned from the upstream service + ResponseFilters HTTPResponseFilters + // Partition is the partition the config entry is associated with. // Partitioning is a Consul Enterprise feature. Partition string `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_status.go b/vendor/github.com/hashicorp/consul/api/config_entry_status.go index 2d16ea0fc4..997066f24f 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_status.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_status.go @@ -106,6 +106,10 @@ const ( // certificates and cannot bind to any routes GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // JWT providers and cannot bind to any routes + GatewayReasonInvalidJWTProviders GatewayConditionReason = "InvalidJWTProviders" + // This condition indicates that the gateway was unable to resolve // conflicting specification requirements for this Listener. If a // Listener is conflicted, its network port should not be configured @@ -163,6 +167,14 @@ const ( // If the reference is not allowed, the reason RefNotPermitted must be used // instead. GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a JWT configuration with at least one JWTProvider + // that is invalid or does not exist. + // A JWTProvider is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + GatewayListenerReasonInvalidJWTProviderRef GatewayConditionReason = "InvalidJWTProviderRef" ) var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ @@ -172,6 +184,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayReasonInvalidCertificates, + GatewayReasonInvalidJWTProviders, }, ConditionStatusUnknown: {}, }, @@ -190,6 +203,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayListenerReasonInvalidCertificateRef, + GatewayListenerReasonInvalidJWTProviderRef, }, ConditionStatusUnknown: {}, }, @@ -282,6 +296,10 @@ const ( // This reason is used with the "Bound" condition when the route fails // to find the gateway RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" + + // This reason is used with the "Accepted" condition when the route references non-existent + // JWTProviders + RouteReasonJWTProvidersNotFound RouteConditionReason = "JWTProvidersNotFound" ) var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ @@ -302,6 +320,7 @@ var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStat ConditionStatusFalse: { RouteReasonGatewayNotFound, RouteReasonFailedToBind, + RouteReasonJWTProvidersNotFound, }, ConditionStatusUnknown: {}, }, diff --git a/vendor/github.com/hashicorp/consul/api/internal.go b/vendor/github.com/hashicorp/consul/api/internal.go index dee161a65e..b5f400f4b1 100644 --- a/vendor/github.com/hashicorp/consul/api/internal.go +++ b/vendor/github.com/hashicorp/consul/api/internal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "context" diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index d72c00c97b..f0f5794aa5 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -68,9 +68,14 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e } // RaftLeaderTransfer is used to transfer the current raft leader to another node -func (op *Operator) RaftLeaderTransfer(q *QueryOptions) (*TransferLeaderResponse, error) { +// Optionally accepts a non-empty id of another node to transfer leadership to. +func (op *Operator) RaftLeaderTransfer(id string, q *QueryOptions) (*TransferLeaderResponse, error) { r := op.c.newRequest("POST", "/v1/operator/raft/transfer-leader") r.setQueryOptions(q) + + if id != "" { + r.params.Set("id", id) + } _, resp, err := op.c.doRequest(r) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 0000000000..5f16dd140c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,45 @@ +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 0000000000..4d25050903 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 0000000000..da5d1aca14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,296 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 0000000000..e87df69906 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,407 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 0000000000..cc888d43e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/klauspost/compress/gzhttp/transport.go b/vendor/github.com/klauspost/compress/gzhttp/transport.go index a199fbc6e8..623aea2ed8 100644 --- a/vendor/github.com/klauspost/compress/gzhttp/transport.go +++ b/vendor/github.com/klauspost/compress/gzhttp/transport.go @@ -14,7 +14,7 @@ import ( "github.com/klauspost/compress/zstd" ) -// Transport will wrap a transport with a custom handler +// Transport will wrap an HTTP transport with a custom handler // that will request gzip and automatically decompress it. // Using this is significantly faster than using the default transport. func Transport(parent http.RoundTripper, opts ...transportOption) http.RoundTripper { @@ -51,10 +51,21 @@ func TransportEnableGzip(b bool) transportOption { } } +// TransportCustomEval will send the header of a response to a custom function. +// If the function returns false, the response will be returned as-is, +// Otherwise it will be decompressed based on Content-Encoding field, regardless +// of whether the transport added the encoding. +func TransportCustomEval(fn func(header http.Header) bool) transportOption { + return func(c *gzRoundtripper) { + c.customEval = fn + } +} + type gzRoundtripper struct { parent http.RoundTripper acceptEncoding string withZstd, withGzip bool + customEval func(header http.Header) bool } func (g *gzRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -82,16 +93,22 @@ func (g *gzRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil || !requestedComp { return resp, err } - + decompress := false + if g.customEval != nil { + if !g.customEval(resp.Header) { + return resp, nil + } + decompress = true + } // Decompress - if g.withGzip && asciiEqualFold(resp.Header.Get("Content-Encoding"), "gzip") { + if (decompress || g.withGzip) && asciiEqualFold(resp.Header.Get("Content-Encoding"), "gzip") { resp.Body = &gzipReader{body: resp.Body} resp.Header.Del("Content-Encoding") resp.Header.Del("Content-Length") resp.ContentLength = -1 resp.Uncompressed = true } - if g.withZstd && asciiEqualFold(resp.Header.Get("Content-Encoding"), "zstd") { + if (decompress || g.withZstd) && asciiEqualFold(resp.Header.Get("Content-Encoding"), "zstd") { resp.Body = &zstdReader{body: resp.Body} resp.Header.Del("Content-Encoding") resp.Header.Del("Content-Length") diff --git a/vendor/github.com/klauspost/compress/gzhttp/writer/gzkp/gzkp.go b/vendor/github.com/klauspost/compress/gzhttp/writer/gzkp/gzkp.go index e31c46c4cf..1930af12e7 100644 --- a/vendor/github.com/klauspost/compress/gzhttp/writer/gzkp/gzkp.go +++ b/vendor/github.com/klauspost/compress/gzhttp/writer/gzkp/gzkp.go @@ -24,6 +24,12 @@ func init() { // poolIndex maps a compression level to its index into gzipWriterPools. It // assumes that level is a valid gzip compression level. func poolIndex(level int) int { + if level > gzip.BestCompression { + level = gzip.BestCompression + } + if level < gzip.StatelessCompression { + level = gzip.BestSpeed + } return level - gzip.StatelessCompression } diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go index dc2362a63b..00a0a2c386 100644 --- a/vendor/github.com/klauspost/compress/gzip/gunzip.go +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -238,6 +238,11 @@ func (z *Reader) readHeader() (hdr Header, err error) { } } + // Reserved FLG bits must be zero. + if flg>>5 != 0 { + return hdr, ErrHeader + } + z.digest = 0 if z.decompressor == nil { z.decompressor = flate.NewReader(z.r) diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d232..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 518436cf3d..84aa3d12f0 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index e8ad17ad08..77ecd68e0a 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - br byteReader + srcLen int // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.br.init(in) + s.srcLen = len(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index bdd49c8b25..92e2347bbc 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). diff --git a/vendor/github.com/metalmatze/signal/LICENSE b/vendor/github.com/metalmatze/signal/LICENSE new file mode 100644 index 0000000000..e7c9c2291a --- /dev/null +++ b/vendor/github.com/metalmatze/signal/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Signal Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/metalmatze/signal/server/signalhttp/instrumentation.go b/vendor/github.com/metalmatze/signal/server/signalhttp/instrumentation.go new file mode 100644 index 0000000000..397e181caa --- /dev/null +++ b/vendor/github.com/metalmatze/signal/server/signalhttp/instrumentation.go @@ -0,0 +1,96 @@ +// Copyright 2021 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signalhttp + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type instrumentationMiddleware struct { + requestCounter *prometheus.CounterVec + requestSize *prometheus.SummaryVec + requestDuration *prometheus.HistogramVec + responseSize *prometheus.HistogramVec +} + +// HandlerInstrumenter can instrument handlers. +type HandlerInstrumenter interface { + NewHandler(labels prometheus.Labels, handler http.Handler) http.HandlerFunc +} + +// NewHandlerInstrumenter creates a new middleware that observes some metrics for HTTP handlers. +func NewHandlerInstrumenter(r prometheus.Registerer, extraLabels []string) HandlerInstrumenter { + labels := append([]string{"code", "method"}, extraLabels...) + + ins := &instrumentationMiddleware{ + requestCounter: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "Counter of HTTP requests.", + }, + labels, + ), + requestSize: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "http_request_size_bytes", + Help: "Size of HTTP requests.", + }, + labels, + ), + requestDuration: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_duration_seconds", + Help: "Histogram of latencies for HTTP requests.", + Buckets: []float64{.1, .2, .4, 1, 2.5, 5, 8, 20, 60, 120}, + }, + labels, + ), + responseSize: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_response_size_bytes", + Help: "Histogram of response size for HTTP requests.", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + labels, + ), + } + + if r != nil { + r.MustRegister( + ins.requestCounter, + ins.requestSize, + ins.requestDuration, + ins.responseSize, + ) + } + + return ins +} + +// NewHandler wraps a HTTP handler with some metrics for HTTP handlers. +func (ins *instrumentationMiddleware) NewHandler(labels prometheus.Labels, handler http.Handler) http.HandlerFunc { + return promhttp.InstrumentHandlerCounter(ins.requestCounter.MustCurryWith(labels), + promhttp.InstrumentHandlerRequestSize(ins.requestSize.MustCurryWith(labels), + promhttp.InstrumentHandlerDuration(ins.requestDuration.MustCurryWith(labels), + promhttp.InstrumentHandlerResponseSize(ins.responseSize.MustCurryWith(labels), + handler, + ), + ), + ), + ) +} diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go index ab2812e334..1a59a854ec 100644 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -10,8 +10,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction // // * opcode isn't OpcodeQuery or OpcodeNotify // -// * Zero bit isn't zero -// // * does not have exactly 1 question in the question section // // * has more than 1 RR in the Answer section diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 6d7e176054..02d9199a49 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -5,7 +5,6 @@ import ( "net" "strconv" "strings" - "unicode" ) const hexDigit = "0123456789abcdef" @@ -23,8 +22,7 @@ func (dns *Msg) SetReply(request *Msg) *Msg { } dns.Rcode = RcodeSuccess if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] + dns.Question = []Question{request.Question[0]} } return dns } @@ -293,26 +291,19 @@ func IsFqdn(s string) bool { return (len(s)-i)%2 != 0 } -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. +// IsRRset reports whether a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. func IsRRset(rrset []RR) bool { if len(rrset) == 0 { return false } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name + baseH := rrset[0].Header() for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + curH := rr.Header() + if curH.Rrtype != baseH.Rrtype || curH.Class != baseH.Class || curH.Name != baseH.Name { // Mismatch between the records, so this is not a valid rrset for - //signing/verifying + // signing/verifying return false } } @@ -330,19 +321,15 @@ func Fqdn(s string) string { } // CanonicalName returns the domain name in canonical form. A name in canonical -// form is lowercase and fully qualified. See Section 6.2 in RFC 4034. -// According to the RFC all uppercase US-ASCII letters in the owner name of the -// RR areeplaced by the corresponding lowercase US-ASCII letters. +// form is lowercase and fully qualified. Only US-ASCII letters are affected. See +// Section 6.2 in RFC 4034. func CanonicalName(s string) string { - var result strings.Builder - for _, ch := range s { - if unicode.IsUpper(ch) && (ch >= 0x00 && ch <= 0x7F) { - result.WriteRune(unicode.ToLower(ch)) - } else { - result.WriteRune(ch) + return strings.Map(func(r rune) rune { + if r >= 'A' && r <= 'Z' { + r += 'a' - 'A' } - } - return Fqdn(result.String()) + return r + }, Fqdn(s)) } // Copied from the official Go code. diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index f79658169f..5e72249b52 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -37,7 +37,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er return nil, ErrPrivKey } // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + algoStr, _, _ := strings.Cut(m["algorithm"], " ") + algo, err := strconv.ParseUint(algoStr, 10, 8) if err != nil { return nil, ErrPrivKey } diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index b5bdac8160..1b58e8f0aa 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -185,7 +185,7 @@ func (rr *OPT) Do() bool { // SetDo sets the DO (DNSSEC OK) bit. // If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +// It is possible to pass 2 or more arguments, but they will be ignored. func (rr *OPT) SetDo(do ...bool) { if len(do) == 1 { if do[0] { @@ -508,6 +508,7 @@ func (e *EDNS0_LLQ) String() string { " " + strconv.FormatUint(uint64(e.LeaseLife), 10) return s } + func (e *EDNS0_LLQ) copy() EDNS0 { return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} } diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go index ac8df34dd5..713e9d2dad 100644 --- a/vendor/github.com/miekg/dns/generate.go +++ b/vendor/github.com/miekg/dns/generate.go @@ -35,17 +35,17 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { token = token[:i] } - sx := strings.SplitN(token, "-", 2) - if len(sx) != 2 { + startStr, endStr, ok := strings.Cut(token, "-") + if !ok { return zp.setParseError("bad start-stop in $GENERATE range", l) } - start, err := strconv.ParseInt(sx[0], 10, 64) + start, err := strconv.ParseInt(startStr, 10, 64) if err != nil { return zp.setParseError("bad start in $GENERATE range", l) } - end, err := strconv.ParseInt(sx[1], 10, 64) + end, err := strconv.ParseInt(endStr, 10, 64) if err != nil { return zp.setParseError("bad stop in $GENERATE range", l) } @@ -54,7 +54,7 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { } // _BLANK - l, ok := zp.c.Next() + l, ok = zp.c.Next() if !ok || l.value != zBlank { return zp.setParseError("garbage after $GENERATE range", l) } @@ -211,15 +211,16 @@ func (r *generateReader) ReadByte() (byte, error) { func modToPrintf(s string) (string, int64, string) { // Modifier is { offset [ ,width [ ,base ] ] } - provide default // values for optional width and type, if necessary. - var offStr, widthStr, base string - switch xs := strings.Split(s, ","); len(xs) { - case 1: - offStr, widthStr, base = xs[0], "0", "d" - case 2: - offStr, widthStr, base = xs[0], xs[1], "d" - case 3: - offStr, widthStr, base = xs[0], xs[1], xs[2] - default: + offStr, s, ok0 := strings.Cut(s, ",") + widthStr, s, ok1 := strings.Cut(s, ",") + base, _, ok2 := strings.Cut(s, ",") + if !ok0 { + widthStr = "0" + } + if !ok1 { + base = "d" + } + if ok2 { return "", 0, "bad modifier in $GENERATE" } @@ -234,8 +235,8 @@ func modToPrintf(s string) (string, int64, string) { return "", 0, "bad offset in $GENERATE" } - width, err := strconv.ParseInt(widthStr, 10, 64) - if err != nil || width < 0 || width > 255 { + width, err := strconv.ParseUint(widthStr, 10, 8) + if err != nil { return "", 0, "bad width in $GENERATE" } diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go index 6ed50f86be..8cebb2f171 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go @@ -7,16 +7,18 @@ import "net" const supportsReusePort = false -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - if reuseport { +func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { + if reuseport || reuseaddr { // TODO(tmthrgd): return an error? } return net.Listen(network, addr) } -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - if reuseport { +const supportsReuseAddr = false + +func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { + if reuseport || reuseaddr { // TODO(tmthrgd): return an error? } diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go index 89bac90342..41326f20b7 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_reuseport.go @@ -25,19 +25,41 @@ func reuseportControl(network, address string, c syscall.RawConn) error { return opErr } -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { +const supportsReuseAddr = true + +func reuseaddrControl(network, address string, c syscall.RawConn) error { + var opErr error + err := c.Control(func(fd uintptr) { + opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) + if err != nil { + return err + } + + return opErr +} + +func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { var lc net.ListenConfig - if reuseport { + switch { + case reuseaddr && reuseport: + case reuseport: lc.Control = reuseportControl + case reuseaddr: + lc.Control = reuseaddrControl } return lc.Listen(context.Background(), network, addr) } -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { +func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { var lc net.ListenConfig - if reuseport { + switch { + case reuseaddr && reuseport: + case reuseport: lc.Control = reuseportControl + case reuseaddr: + lc.Control = reuseaddrControl } return lc.ListenPacket(context.Background(), network, addr) diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index b05cf14e9e..8294d03958 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -501,30 +501,28 @@ func packTxtString(s string, msg []byte, offset int) (int, error) { return offset, nil } -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { +func packOctetString(s string, msg []byte, offset int) (int, error) { + if offset >= len(msg) || len(s) > 256*4+1 { return offset, ErrBuf } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { + for i := 0; i < len(s); i++ { if len(msg) <= offset { return offset, ErrBuf } - if bs[i] == '\\' { + if s[i] == '\\' { i++ - if i == len(bs) { + if i == len(s) { break } // check for \DDD - if isDDD(bs[i:]) { - msg[offset] = dddToByte(bs[i:]) + if isDDD(s[i:]) { + msg[offset] = dddToByte(s[i:]) i += 2 } else { - msg[offset] = bs[i] + msg[offset] = s[i] } } else { - msg[offset] = bs[i] + msg[offset] = s[i] } offset++ } diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 8582fc0adb..acec21f7de 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -20,9 +20,7 @@ func unpackDataA(msg []byte, off int) (net.IP, int, error) { if off+net.IPv4len > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking a"} } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil + return cloneSlice(msg[off : off+net.IPv4len]), off + net.IPv4len, nil } func packDataA(a net.IP, msg []byte, off int) (int, error) { @@ -47,9 +45,7 @@ func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { if off+net.IPv6len > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking aaaa"} } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil + return cloneSlice(msg[off : off+net.IPv6len]), off + net.IPv6len, nil } func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { @@ -410,29 +406,24 @@ func packStringTxt(s []string, msg []byte, off int) (int, error) { func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { var edns []EDNS0 -Option: - var code uint16 - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - e := makeDataOpt(code) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - - if off < len(msg) { - goto Option + for off < len(msg) { + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code := binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + opt := makeDataOpt(code) + if err := opt.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, opt) + off += int(optlen) } - return edns, off, nil } @@ -461,8 +452,7 @@ func unpackStringOctet(msg []byte, off int) (string, int, error) { } func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) + off, err := packOctetString(s, msg, off) if err != nil { return len(msg), err } diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 3083c3e5f3..062d8ff3a0 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -605,8 +605,6 @@ func (zp *ZoneParser) Next() (RR, bool) { if !isPrivate && zp.c.Peek().token == "" { // This is a dynamic update rr. - // TODO(tmthrgd): Previously slurpRemainder was only called - // for certain RR types, which may have been important. if err := slurpRemainder(zp.c); err != nil { return zp.setParseError(err.err, err.lex) } @@ -1216,42 +1214,34 @@ func stringToCm(token string) (e, m uint8, ok bool) { if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { token = token[0 : len(token)-1] } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } + + var ( + meters, cmeters, val int + err error + ) + mStr, cmStr, hasCM := strings.Cut(token, ".") + if hasCM { // There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12'). // So we simply reject it. // We also make sure the first character is a digit to reject '+-' signs. - if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' { + cmeters, err = strconv.Atoi(cmStr) + if err != nil || len(cmStr) > 2 || cmStr[0] < '0' || cmStr[0] > '9' { return } - if len(s[1]) == 1 { + if len(cmStr) == 1 { // 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm. cmeters *= 10 } - if s[0] == "" { - // This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). - break - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } + } + // This slighly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). + if !hasCM || mStr != "" { + meters, err = strconv.Atoi(mStr) // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. - if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) { + if err != nil || mStr[0] < '0' || mStr[0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) { return } - case 0: - // huh? - return 0, 0, false } - ok = true + if meters > 0 { e = 2 val = meters @@ -1263,8 +1253,7 @@ func stringToCm(token string) (e, m uint8, ok bool) { e++ val /= 10 } - m = uint8(val) - return + return e, uint8(val), true } func toAbsoluteName(name, origin string) (absolute string, ok bool) { diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index d08c8e6a72..a635e1c5cb 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -1,7 +1,6 @@ package dns import ( - "bytes" "encoding/base64" "errors" "net" @@ -12,15 +11,15 @@ import ( // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // or an error func endingToString(c *zlexer, errstr string) (string, *ParseError) { - var buffer bytes.Buffer + var s strings.Builder l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { - return buffer.String(), &ParseError{"", errstr, l} + return s.String(), &ParseError{"", errstr, l} } switch l.value { case zString: - buffer.WriteString(l.token) + s.WriteString(l.token) case zBlank: // Ok default: return "", &ParseError{"", errstr, l} @@ -28,7 +27,7 @@ func endingToString(c *zlexer, errstr string) (string, *ParseError) { l, _ = c.Next() } - return buffer.String(), nil + return s.String(), nil } // A remainder of the rdata with embedded spaces, split on unquoted whitespace diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 64e3885462..0207d6da22 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -226,6 +226,10 @@ type Server struct { // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. // It is only supported on certain GOOSes and when using ListenAndServe. ReusePort bool + // Whether to set the SO_REUSEADDR socket option, allowing multiple listeners to be bound to a single address. + // Crucially this allows binding when an existing server is listening on `0.0.0.0` or `::`. + // It is only supported on certain GOOSes and when using ListenAndServe. + ReuseAddr bool // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. MsgAcceptFunc MsgAcceptFunc @@ -304,7 +308,7 @@ func (srv *Server) ListenAndServe() error { switch srv.Net { case "tcp", "tcp4", "tcp6": - l, err := listenTCP(srv.Net, addr, srv.ReusePort) + l, err := listenTCP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } @@ -317,7 +321,7 @@ func (srv *Server) ListenAndServe() error { return errors.New("dns: neither Certificates nor GetCertificate set in Config") } network := strings.TrimSuffix(srv.Net, "-tls") - l, err := listenTCP(network, addr, srv.ReusePort) + l, err := listenTCP(network, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } @@ -327,7 +331,7 @@ func (srv *Server) ListenAndServe() error { unlock() return srv.serveTCP(l) case "udp", "udp4", "udp6": - l, err := listenUDP(srv.Net, addr, srv.ReusePort) + l, err := listenUDP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index 6d496d74de..d38aa2f05c 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -314,10 +314,11 @@ func (s *SVCBMandatory) unpack(b []byte) error { } func (s *SVCBMandatory) parse(b string) error { - str := strings.Split(b, ",") - codes := make([]SVCBKey, 0, len(str)) - for _, e := range str { - codes = append(codes, svcbStringToKey(e)) + codes := make([]SVCBKey, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var key string + key, b, _ = strings.Cut(b, ",") + codes = append(codes, svcbStringToKey(key)) } s.Code = codes return nil @@ -613,19 +614,24 @@ func (s *SVCBIPv4Hint) String() string { } func (s *SVCBIPv4Hint) parse(b string) error { + if b == "" { + return errors.New("dns: svcbipv4hint: empty hint") + } if strings.Contains(b, ":") { return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6") } - str := strings.Split(b, ",") - dst := make([]net.IP, len(str)) - for i, e := range str { + + hint := make([]net.IP, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var e string + e, b, _ = strings.Cut(b, ",") ip := net.ParseIP(e).To4() if ip == nil { return errors.New("dns: svcbipv4hint: bad ip") } - dst[i] = ip + hint = append(hint, ip) } - s.Hint = dst + s.Hint = hint return nil } @@ -733,9 +739,14 @@ func (s *SVCBIPv6Hint) String() string { } func (s *SVCBIPv6Hint) parse(b string) error { - str := strings.Split(b, ",") - dst := make([]net.IP, len(str)) - for i, e := range str { + if b == "" { + return errors.New("dns: svcbipv6hint: empty hint") + } + + hint := make([]net.IP, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var e string + e, b, _ = strings.Cut(b, ",") ip := net.ParseIP(e) if ip == nil { return errors.New("dns: svcbipv6hint: bad ip") @@ -743,9 +754,9 @@ func (s *SVCBIPv6Hint) parse(b string) error { if ip.To4() != nil { return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6") } - dst[i] = ip + hint = append(hint, ip) } - s.Hint = dst + s.Hint = hint return nil } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index a091136629..9fd300f660 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 56} +var Version = v{1, 1, 57} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 0a831c8805..05b3c5adde 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -80,8 +80,13 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { first := true - defer t.Close() - defer close(c) + defer func() { + // First close the connection, then the channel. This allows functions blocked on + // the channel to assume that the connection is closed and no further operations are + // pending when they resume. + t.Close() + close(c) + }() timeout := dnsTimeout if t.ReadTimeout != 0 { timeout = t.ReadTimeout @@ -131,8 +136,13 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { axfr := true n := 0 qser := q.Ns[0].(*SOA).Serial - defer t.Close() - defer close(c) + defer func() { + // First close the connection, then the channel. This allows functions blocked on + // the channel to assume that the connection is closed and no further operations are + // pending when they resume. + t.Close() + close(c) + }() timeout := dnsTimeout if t.ReadTimeout != 0 { timeout = t.ReadTimeout diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/LICENSE b/vendor/github.com/prometheus-community/prom-label-proxy/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/alerts.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/alerts.go new file mode 100644 index 0000000000..1eed4c1679 --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/alerts.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import "net/http" + +// alerts proxies HTTP requests to the Alertmanager /api/v2/alerts endpoint. +func (r *routes) alerts(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case "GET": + r.enforceFilterParameter(w, req) + default: + http.NotFound(w, req) + } +} diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/enforce.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/enforce.go new file mode 100644 index 0000000000..8cc1f28acb --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/enforce.go @@ -0,0 +1,168 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import ( + "fmt" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +type Enforcer struct { + labelMatchers map[string]*labels.Matcher + errorOnReplace bool +} + +func NewEnforcer(errorOnReplace bool, ms ...*labels.Matcher) *Enforcer { + entries := make(map[string]*labels.Matcher) + + for _, matcher := range ms { + entries[matcher.Name] = matcher + } + + return &Enforcer{ + labelMatchers: entries, + errorOnReplace: errorOnReplace, + } +} + +type IllegalLabelMatcherError struct { + msg string +} + +func (e IllegalLabelMatcherError) Error() string { return e.msg } + +func newIllegalLabelMatcherError(existing string, replacement string) IllegalLabelMatcherError { + return IllegalLabelMatcherError{ + msg: fmt.Sprintf("label matcher value (%s) conflicts with injected value (%s)", existing, replacement), + } +} + +// EnforceNode walks the given node recursively +// and enforces the given label enforcer on it. +// +// Whenever a parser.MatrixSelector or parser.VectorSelector AST node is found, +// their label enforcer is being potentially modified. +// If a node's label matcher has the same name as a label matcher +// of the given enforcer, then it will be replaced. +func (ms Enforcer) EnforceNode(node parser.Node) error { + switch n := node.(type) { + case *parser.EvalStmt: + if err := ms.EnforceNode(n.Expr); err != nil { + return err + } + + case parser.Expressions: + for _, e := range n { + if err := ms.EnforceNode(e); err != nil { + return err + } + } + + case *parser.AggregateExpr: + if err := ms.EnforceNode(n.Expr); err != nil { + return err + } + + case *parser.BinaryExpr: + if err := ms.EnforceNode(n.LHS); err != nil { + return err + } + + if err := ms.EnforceNode(n.RHS); err != nil { + return err + } + + case *parser.Call: + if err := ms.EnforceNode(n.Args); err != nil { + return err + } + + case *parser.SubqueryExpr: + if err := ms.EnforceNode(n.Expr); err != nil { + return err + } + + case *parser.ParenExpr: + if err := ms.EnforceNode(n.Expr); err != nil { + return err + } + + case *parser.UnaryExpr: + if err := ms.EnforceNode(n.Expr); err != nil { + return err + } + + case *parser.NumberLiteral, *parser.StringLiteral: + // nothing to do + + case *parser.MatrixSelector: + // inject labelselector + if vs, ok := n.VectorSelector.(*parser.VectorSelector); ok { + var err error + vs.LabelMatchers, err = ms.EnforceMatchers(vs.LabelMatchers) + if err != nil { + return err + } + } + + case *parser.VectorSelector: + // inject labelselector + var err error + n.LabelMatchers, err = ms.EnforceMatchers(n.LabelMatchers) + if err != nil { + return err + } + + default: + panic(fmt.Errorf("parser.Walk: unhandled node type %T", n)) + } + + return nil +} + +// EnforceMatchers appends the configured label matcher if not present. +// If the label matcher that is to be injected is present (by labelname) but +// different (either by match type or value) the behavior depends on the +// errorOnReplace variable and the enforced matcher(s): +// * if errorOnReplace is true, an error is returned, +// * if errorOnReplace is false and the label matcher type is '=', the existing matcher is silently replaced. +// * otherwise the existing matcher is preserved. +func (ms Enforcer) EnforceMatchers(targets []*labels.Matcher) ([]*labels.Matcher, error) { + var res []*labels.Matcher + + for _, target := range targets { + if matcher, ok := ms.labelMatchers[target.Name]; ok { + // matcher.String() returns something like "labelfoo=value" + if ms.errorOnReplace && matcher.String() != target.String() { + return res, newIllegalLabelMatcherError(matcher.String(), target.String()) + } + + // Drop the existing matcher only if the enforced matcher is an + // equal matcher. + if matcher.Type == labels.MatchEqual { + continue + } + } + + res = append(res, target) + } + + for _, enforcedMatcher := range ms.labelMatchers { + res = append(res, enforcedMatcher) + } + + return res, nil +} diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/routes.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/routes.go new file mode 100644 index 0000000000..266d322686 --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/routes.go @@ -0,0 +1,640 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "sort" + "strings" + + "github.com/efficientgo/core/merrors" + "github.com/metalmatze/signal/server/signalhttp" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +const ( + queryParam = "query" + matchersParam = "match[]" +) + +type routes struct { + upstream *url.URL + handler http.Handler + label string + el ExtractLabeler + + mux http.Handler + modifiers map[string]func(*http.Response) error + errorOnReplace bool +} + +type options struct { + enableLabelAPIs bool + passthroughPaths []string + errorOnReplace bool + registerer prometheus.Registerer +} + +type Option interface { + apply(*options) +} + +type optionFunc func(*options) + +func (f optionFunc) apply(o *options) { + f(o) +} + +// WithPrometheusRegistry configures the proxy to use the given registerer. +func WithPrometheusRegistry(reg prometheus.Registerer) Option { + return optionFunc(func(o *options) { + o.registerer = reg + }) +} + +// WithEnabledLabelsAPI enables proxying to labels API. If false, "501 Not implemented" will be return for those. +func WithEnabledLabelsAPI() Option { + return optionFunc(func(o *options) { + o.enableLabelAPIs = true + }) +} + +// WithPassthroughPaths configures routes to register given paths as passthrough handlers for all HTTP methods. +// that, if requested, will be forwarded without enforcing label. Use with care. +// NOTE: Passthrough "all" paths like "/" or "" and regex are not allowed. +func WithPassthroughPaths(paths []string) Option { + return optionFunc(func(o *options) { + o.passthroughPaths = paths + }) +} + +// WithErrorOnReplace causes the proxy to return 400 if a label matcher we want to +// inject is present in the query already and matches something different +func WithErrorOnReplace() Option { + return optionFunc(func(o *options) { + o.errorOnReplace = true + }) +} + +// mux abstracts away the behavior we expect from the http.ServeMux type in this package. +type mux interface { + http.Handler + Handle(string, http.Handler) +} + +// strictMux is a mux that wraps standard HTTP handler with safer handler that allows safe user provided handler registrations. +type strictMux struct { + mux + seen map[string]struct{} +} + +func newStrictMux(m mux) *strictMux { + return &strictMux{ + m, + map[string]struct{}{}, + } + +} + +// Handle is like HTTP mux handle but it does not allow to register paths that are shared with previously registered paths. +// It also makes sure the trailing / is registered too. +// For example if /api/v1/federate was registered consequent registrations like /api/v1/federate/ or /api/v1/federate/some will +// return error. In the mean time request with both /api/v1/federate and /api/v1/federate/ will point to the handled passed by /api/v1/federate +// registration. +// This allows to de-risk ability for user to mis-configure and leak inject isolation. +func (s *strictMux) Handle(pattern string, handler http.Handler) error { + sanitized := pattern + for next := strings.TrimSuffix(sanitized, "/"); next != sanitized; sanitized = next { + } + + if _, ok := s.seen[sanitized]; ok { + return fmt.Errorf("pattern %q was already registered", sanitized) + } + + for p := range s.seen { + if strings.HasPrefix(sanitized+"/", p+"/") { + return fmt.Errorf("pattern %q is registered, cannot register path %q that shares it", p, sanitized) + } + } + + s.mux.Handle(sanitized, handler) + s.mux.Handle(sanitized+"/", handler) + s.seen[sanitized] = struct{}{} + + return nil +} + +// instrumentedMux wraps a mux and instruments it. +type instrumentedMux struct { + mux + i signalhttp.HandlerInstrumenter +} + +func newInstrumentedMux(m mux, r prometheus.Registerer) *instrumentedMux { + return &instrumentedMux{ + m, + signalhttp.NewHandlerInstrumenter(r, []string{"handler"}), + } +} + +// Handle implements the mux interface. +func (i *instrumentedMux) Handle(pattern string, handler http.Handler) { + i.mux.Handle(pattern, i.i.NewHandler(prometheus.Labels{"handler": pattern}, handler)) +} + +// ExtractLabeler is an HTTP handler that extract the label value to be +// enforced from the HTTP request. If a valid label value is found, it should +// store it in the request's context. Otherwise it should return an error in +// the HTTP response (usually 400 or 500). +type ExtractLabeler interface { + ExtractLabel(next http.HandlerFunc) http.Handler +} + +// HTTPFormEnforcer enforces a label value extracted from the HTTP form and query parameters. +type HTTPFormEnforcer struct { + ParameterName string +} + +// ExtractLabel implements the ExtractLabeler interface. +func (hff HTTPFormEnforcer) ExtractLabel(next http.HandlerFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + labelValues, err := hff.getLabelValues(r) + if err != nil { + prometheusAPIError(w, humanFriendlyErrorMessage(err), http.StatusBadRequest) + return + } + + // Remove the proxy label from the query parameters. + q := r.URL.Query() + q.Del(hff.ParameterName) + r.URL.RawQuery = q.Encode() + + // Remove the param from the PostForm. + if r.Method == http.MethodPost { + if err := r.ParseForm(); err != nil { + prometheusAPIError(w, fmt.Sprintf("Failed to parse the PostForm: %v", err), http.StatusInternalServerError) + return + } + if r.PostForm.Get(hff.ParameterName) != "" { + r.PostForm.Del(hff.ParameterName) + newBody := r.PostForm.Encode() + // We are replacing request body, close previous one (r.FormValue ensures it is read fully and not nil). + _ = r.Body.Close() + r.Body = io.NopCloser(strings.NewReader(newBody)) + r.ContentLength = int64(len(newBody)) + } + } + + next.ServeHTTP(w, r.WithContext(WithLabelValues(r.Context(), labelValues))) + }) +} + +func (hff HTTPFormEnforcer) getLabelValues(r *http.Request) ([]string, error) { + err := r.ParseForm() + if err != nil { + return nil, fmt.Errorf("the form data can not be parsed: %w", err) + } + + formValues := removeEmptyValues(r.Form[hff.ParameterName]) + if len(formValues) == 0 { + return nil, fmt.Errorf("the %q query parameter must be provided", hff.ParameterName) + } + + return formValues, nil +} + +// HTTPHeaderEnforcer enforces a label value extracted from the HTTP headers. +type HTTPHeaderEnforcer struct { + Name string +} + +// ExtractLabel implements the ExtractLabeler interface. +func (hhe HTTPHeaderEnforcer) ExtractLabel(next http.HandlerFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + labelValues, err := hhe.getLabelValues(r) + if err != nil { + prometheusAPIError(w, humanFriendlyErrorMessage(err), http.StatusBadRequest) + return + } + + next.ServeHTTP(w, r.WithContext(WithLabelValues(r.Context(), labelValues))) + }) +} + +func (hhe HTTPHeaderEnforcer) getLabelValues(r *http.Request) ([]string, error) { + headerValues := removeEmptyValues(r.Header[hhe.Name]) + + if len(headerValues) == 0 { + return nil, fmt.Errorf("missing HTTP header %q", hhe.Name) + } + + return headerValues, nil +} + +// StaticLabelEnforcer enforces a static label value. +type StaticLabelEnforcer []string + +// ExtractLabel implements the ExtractLabeler interface. +func (sle StaticLabelEnforcer) ExtractLabel(next http.HandlerFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next(w, r.WithContext(WithLabelValues(r.Context(), sle))) + }) +} + +func NewRoutes(upstream *url.URL, label string, extractLabeler ExtractLabeler, opts ...Option) (*routes, error) { + opt := options{} + for _, o := range opts { + o.apply(&opt) + } + + if opt.registerer == nil { + opt.registerer = prometheus.NewRegistry() + } + + proxy := httputil.NewSingleHostReverseProxy(upstream) + + r := &routes{ + upstream: upstream, + handler: proxy, + label: label, + el: extractLabeler, + errorOnReplace: opt.errorOnReplace, + } + mux := newStrictMux(newInstrumentedMux(http.NewServeMux(), opt.registerer)) + + errs := merrors.New( + mux.Handle("/federate", r.el.ExtractLabel(enforceMethods(r.matcher, "GET"))), + mux.Handle("/api/v1/query", r.el.ExtractLabel(enforceMethods(r.query, "GET", "POST"))), + mux.Handle("/api/v1/query_range", r.el.ExtractLabel(enforceMethods(r.query, "GET", "POST"))), + mux.Handle("/api/v1/alerts", r.el.ExtractLabel(enforceMethods(r.passthrough, "GET"))), + mux.Handle("/api/v1/rules", r.el.ExtractLabel(enforceMethods(r.passthrough, "GET"))), + mux.Handle("/api/v1/series", r.el.ExtractLabel(enforceMethods(r.matcher, "GET", "POST"))), + mux.Handle("/api/v1/query_exemplars", r.el.ExtractLabel(enforceMethods(r.query, "GET", "POST"))), + ) + + if opt.enableLabelAPIs { + errs.Add( + mux.Handle("/api/v1/labels", r.el.ExtractLabel(enforceMethods(r.matcher, "GET", "POST"))), + // Full path is /api/v1/label//values but http mux does not support patterns. + // This is fine though as we don't care about name for matcher injector. + mux.Handle("/api/v1/label/", r.el.ExtractLabel(enforceMethods(r.matcher, "GET"))), + ) + } + + errs.Add( + // Reject multi label values with assertSingleLabelValue() because the + // semantics of the Silences API don't support multi-label matchers. + mux.Handle("/api/v2/silences", r.el.ExtractLabel( + enforceMethods( + assertSingleLabelValue(r.silences), + "GET", "POST", + ), + )), + mux.Handle("/api/v2/silence/", r.el.ExtractLabel( + enforceMethods( + assertSingleLabelValue(r.deleteSilence), + "DELETE", + ), + )), + + mux.Handle("/api/v2/alerts/groups", r.el.ExtractLabel(enforceMethods(r.enforceFilterParameter, "GET"))), + mux.Handle("/api/v2/alerts", r.el.ExtractLabel(enforceMethods(r.alerts, "GET"))), + ) + + errs.Add( + mux.Handle("/healthz", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _ = json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + })), + ) + + if err := errs.Err(); err != nil { + return nil, err + } + + // Validate paths. + for _, path := range opt.passthroughPaths { + u, err := url.Parse(fmt.Sprintf("http://example.com%v", path)) + if err != nil { + return nil, fmt.Errorf("path %q is not a valid URI path, got %v", path, opt.passthroughPaths) + } + if u.Path != path { + return nil, fmt.Errorf("path %q is not a valid URI path, got %v", path, opt.passthroughPaths) + } + if u.Path == "" || u.Path == "/" { + return nil, fmt.Errorf("path %q is not allowed, got %v", u.Path, opt.passthroughPaths) + } + } + + // Register optional passthrough paths. + for _, path := range opt.passthroughPaths { + if err := mux.Handle(path, http.HandlerFunc(r.passthrough)); err != nil { + return nil, err + } + } + + r.mux = mux + r.modifiers = map[string]func(*http.Response) error{ + "/api/v1/rules": modifyAPIResponse(r.filterRules), + "/api/v1/alerts": modifyAPIResponse(r.filterAlerts), + } + proxy.ModifyResponse = r.ModifyResponse + return r, nil +} + +func (r *routes) ServeHTTP(w http.ResponseWriter, req *http.Request) { + r.mux.ServeHTTP(w, req) +} + +func (r *routes) ModifyResponse(resp *http.Response) error { + m, found := r.modifiers[resp.Request.URL.Path] + if !found { + // Return the server's response unmodified. + return nil + } + return m(resp) +} + +func enforceMethods(h http.HandlerFunc, methods ...string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for _, m := range methods { + if m == req.Method { + h(w, req) + return + } + } + http.NotFound(w, req) + } +} + +type ctxKey int + +const keyLabel ctxKey = iota + +// MustLabelValues returns labels (previously stored using WithLabelValue()) +// from the given context. +// It will panic if no label is found or the value is empty. +func MustLabelValues(ctx context.Context) []string { + labels, ok := ctx.Value(keyLabel).([]string) + if !ok { + panic(fmt.Sprintf("can't find the %q value in the context", keyLabel)) + } + if len(labels) == 0 { + panic(fmt.Sprintf("empty %q value in the context", keyLabel)) + } + + sort.Strings(labels) + + return labels +} + +// MustLabelValue returns the first (alphabetical order) label value previously +// stored using WithLabelValue() from the given context. +// Similar to MustLabelValues, it will panic if no label is found or the value +// is empty. +func MustLabelValue(ctx context.Context) string { + v := MustLabelValues(ctx) + return v[0] +} + +func labelValuesToRegexpString(labelValues []string) string { + lvs := make([]string, len(labelValues)) + for i := range labelValues { + lvs[i] = regexp.QuoteMeta(labelValues[i]) + } + + return strings.Join(lvs, "|") +} + +// WithLabelValues stores labels in the given context. +func WithLabelValues(ctx context.Context, labels []string) context.Context { + return context.WithValue(ctx, keyLabel, labels) +} + +func (r *routes) passthrough(w http.ResponseWriter, req *http.Request) { + r.handler.ServeHTTP(w, req) +} + +func (r *routes) query(w http.ResponseWriter, req *http.Request) { + var matcher *labels.Matcher + + if len(MustLabelValues(req.Context())) > 1 { + matcher = &labels.Matcher{ + Name: r.label, + Type: labels.MatchRegexp, + Value: labelValuesToRegexpString(MustLabelValues(req.Context())), + } + } else { + matcher = &labels.Matcher{ + Name: r.label, + Type: labels.MatchEqual, + Value: MustLabelValue(req.Context()), + } + } + + e := NewEnforcer(r.errorOnReplace, matcher) + + // The `query` can come in the URL query string and/or the POST body. + // For this reason, we need to try to enforcing in both places. + // Note: a POST request may include some values in the URL query string + // and others in the body. If both locations include a `query`, then + // enforce in both places. + q, found1, err := enforceQueryValues(e, req.URL.Query()) + if err != nil { + switch err.(type) { + case IllegalLabelMatcherError: + prometheusAPIError(w, err.Error(), http.StatusBadRequest) + case queryParseError: + prometheusAPIError(w, err.Error(), http.StatusBadRequest) + case enforceLabelError: + prometheusAPIError(w, err.Error(), http.StatusInternalServerError) + } + return + } + req.URL.RawQuery = q + + var found2 bool + // Enforce the query in the POST body if needed. + if req.Method == http.MethodPost { + if err := req.ParseForm(); err != nil { + prometheusAPIError(w, err.Error(), http.StatusBadRequest) + } + q, found2, err = enforceQueryValues(e, req.PostForm) + if err != nil { + switch err.(type) { + case IllegalLabelMatcherError: + prometheusAPIError(w, err.Error(), http.StatusBadRequest) + case queryParseError: + prometheusAPIError(w, err.Error(), http.StatusBadRequest) + case enforceLabelError: + prometheusAPIError(w, err.Error(), http.StatusInternalServerError) + } + return + } + // We are replacing request body, close previous one (ParseForm ensures it is read fully and not nil). + _ = req.Body.Close() + req.Body = io.NopCloser(strings.NewReader(q)) + req.ContentLength = int64(len(q)) + } + + // If no query was found, return early. + if !found1 && !found2 { + return + } + + r.handler.ServeHTTP(w, req) +} + +func enforceQueryValues(e *Enforcer, v url.Values) (values string, noQuery bool, err error) { + // If no values were given or no query is present, + // e.g. because the query came in the POST body + // but the URL query string was passed, then finish early. + if v.Get(queryParam) == "" { + return v.Encode(), false, nil + } + expr, err := parser.ParseExpr(v.Get(queryParam)) + if err != nil { + queryParseError := newQueryParseError(err) + return "", true, queryParseError + } + + if err := e.EnforceNode(expr); err != nil { + if _, ok := err.(IllegalLabelMatcherError); ok { + return "", true, err + } + enforceLabelError := newEnforceLabelError(err) + return "", true, enforceLabelError + } + + v.Set(queryParam, expr.String()) + return v.Encode(), true, nil +} + +// matcher ensures all the provided match[] if any has label injected. If none was provided, single matcher is injected. +// This works for non-query Prometheus APIs like: /api/v1/series, /api/v1/label//values, /api/v1/labels and /federate support multiple matchers. +// See e.g https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metadata +func (r *routes) matcher(w http.ResponseWriter, req *http.Request) { + matcher := &labels.Matcher{ + Name: r.label, + Type: labels.MatchRegexp, + Value: labelValuesToRegexpString(MustLabelValues(req.Context())), + } + + q := req.URL.Query() + if err := injectMatcher(q, matcher); err != nil { + return + } + + req.URL.RawQuery = q.Encode() + if req.Method == http.MethodPost { + if err := req.ParseForm(); err != nil { + return + } + + q = req.PostForm + if err := injectMatcher(q, matcher); err != nil { + return + } + + // We are replacing request body, close previous one (ParseForm ensures it is read fully and not nil). + _ = req.Body.Close() + newBody := q.Encode() + req.Body = io.NopCloser(strings.NewReader(newBody)) + req.ContentLength = int64(len(newBody)) + } + + r.handler.ServeHTTP(w, req) +} + +func injectMatcher(q url.Values, matcher *labels.Matcher) error { + matchers := q[matchersParam] + if len(matchers) == 0 { + q.Set(matchersParam, matchersToString(matcher)) + return nil + } + + // Inject label into existing matchers. + for i, m := range matchers { + ms, err := parser.ParseMetricSelector(m) + if err != nil { + return err + } + + matchers[i] = matchersToString(append(ms, matcher)...) + } + q[matchersParam] = matchers + + return nil +} + +func matchersToString(ms ...*labels.Matcher) string { + var el []string + for _, m := range ms { + el = append(el, m.String()) + } + return fmt.Sprintf("{%v}", strings.Join(el, ",")) +} + +type queryParseError struct { + msg string +} + +func (e queryParseError) Error() string { + return e.msg +} + +func newQueryParseError(err error) queryParseError { + return queryParseError{msg: fmt.Sprintf("error parsing query string %q", err.Error())} +} + +type enforceLabelError struct { + msg string +} + +func (e enforceLabelError) Error() string { + return e.msg +} + +func newEnforceLabelError(err error) enforceLabelError { + return enforceLabelError{msg: fmt.Sprintf("error enforcing label %q", err.Error())} +} + +// humanFriendlyErrorMessage returns an error message with a capitalized first letter +// and a punctuation at the end. +func humanFriendlyErrorMessage(err error) string { + if err == nil { + return "" + } + errMsg := err.Error() + return fmt.Sprintf("%s%s.", strings.ToUpper(errMsg[:1]), errMsg[1:]) +} + +func removeEmptyValues(slice []string) []string { + for i := 0; i < len(slice); i++ { + if slice[i] == "" { + slice = append(slice[:i], slice[i+1:]...) + i-- + } + } + + return slice +} diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/rules.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/rules.go new file mode 100644 index 0000000000..30ac514874 --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/rules.go @@ -0,0 +1,243 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/prometheus/prometheus/model/labels" + "golang.org/x/exp/slices" +) + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data,omitempty"` + ErrorType string `json:"errorType,omitempty"` + Error string `json:"error,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +func getAPIResponse(resp *http.Response) (*apiResponse, error) { + defer resp.Body.Close() + reader := resp.Body + + if resp.Header.Get("Content-Encoding") == "gzip" && !resp.Uncompressed { + var err error + reader, err = gzip.NewReader(resp.Body) + if err != nil { + return nil, fmt.Errorf("gzip decoding error: %w", err) + } + defer reader.Close() + + // TODO: recompress the modified response? + resp.Header.Del("Content-Encoding") + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + var apir apiResponse + if err := json.NewDecoder(reader).Decode(&apir); err != nil { + return nil, fmt.Errorf("JSON decoding error: %w", err) + } + + if apir.Status != "success" { + return nil, fmt.Errorf("unexpected response status: %q", apir.Status) + } + + return &apir, nil +} + +type rulesData struct { + RuleGroups []*ruleGroup `json:"groups"` +} + +type ruleGroup struct { + Name string `json:"name"` + File string `json:"file"` + Rules []rule `json:"rules"` + Interval float64 `json:"interval"` +} + +type rule struct { + *alertingRule + *recordingRule +} + +func (r *rule) Labels() labels.Labels { + if r.alertingRule != nil { + return r.alertingRule.Labels + } + return r.recordingRule.Labels +} + +// MarshalJSON implements the json.Marshaler interface for rule. +func (r *rule) MarshalJSON() ([]byte, error) { + if r.alertingRule != nil { + return json.Marshal(r.alertingRule) + } + return json.Marshal(r.recordingRule) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for rule. +func (r *rule) UnmarshalJSON(b []byte) error { + var ruleType struct { + Type string `json:"type"` + } + if err := json.Unmarshal(b, &ruleType); err != nil { + return err + } + switch ruleType.Type { + case "alerting": + var alertingr alertingRule + if err := json.Unmarshal(b, &alertingr); err != nil { + return err + } + r.alertingRule = &alertingr + case "recording": + var recordingr recordingRule + if err := json.Unmarshal(b, &recordingr); err != nil { + return err + } + r.recordingRule = &recordingr + default: + return fmt.Errorf("failed to unmarshal rule: unknown type %q", ruleType.Type) + } + + return nil +} + +type alertingRule struct { + Name string `json:"name"` + Query string `json:"query"` + Duration float64 `json:"duration"` + Labels labels.Labels `json:"labels"` + Annotations labels.Labels `json:"annotations"` + Alerts []*alert `json:"alerts"` + Health string `json:"health"` + LastError string `json:"lastError,omitempty"` + // Type of an alertingRule is always "alerting". + Type string `json:"type"` +} + +type recordingRule struct { + Name string `json:"name"` + Query string `json:"query"` + Labels labels.Labels `json:"labels,omitempty"` + Health string `json:"health"` + LastError string `json:"lastError,omitempty"` + // Type of a recordingRule is always "recording". + Type string `json:"type"` +} + +type alertsData struct { + Alerts []*alert `json:"alerts"` +} + +type alert struct { + Labels labels.Labels `json:"labels"` + Annotations labels.Labels `json:"annotations"` + State string `json:"state"` + ActiveAt *time.Time `json:"activeAt,omitempty"` + Value string `json:"value"` +} + +// modifyAPIResponse unwraps the Prometheus API response, passes the enforced +// label value and the response to the given function and finally replaces the +// result in the response. +func modifyAPIResponse(f func([]string, *apiResponse) (interface{}, error)) func(*http.Response) error { + return func(resp *http.Response) error { + if resp.StatusCode != http.StatusOK { + // Pass non-200 responses as-is. + return nil + } + + apir, err := getAPIResponse(resp) + if err != nil { + return fmt.Errorf("can't decode API response: %w", err) + } + + v, err := f(MustLabelValues(resp.Request.Context()), apir) + if err != nil { + return err + } + + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("can't replace data: %w", err) + } + apir.Data = json.RawMessage(b) + + var buf bytes.Buffer + if err = json.NewEncoder(&buf).Encode(apir); err != nil { + return fmt.Errorf("can't encode API response: %w", err) + } + resp.Body = io.NopCloser(&buf) + resp.Header["Content-Length"] = []string{fmt.Sprint(buf.Len())} + + return nil + } +} + +func (r *routes) filterRules(lvalues []string, resp *apiResponse) (interface{}, error) { + var rgs rulesData + if err := json.Unmarshal(resp.Data, &rgs); err != nil { + return nil, fmt.Errorf("can't decode rules data: %w", err) + } + + filtered := []*ruleGroup{} + for _, rg := range rgs.RuleGroups { + var rules []rule + for _, rule := range rg.Rules { + for _, lbl := range rule.Labels() { + if lbl.Name == r.label && slices.Contains(lvalues, lbl.Value) { + rules = append(rules, rule) + break + } + } + } + if len(rules) > 0 { + rg.Rules = rules + filtered = append(filtered, rg) + } + } + + return &rulesData{RuleGroups: filtered}, nil +} + +func (r *routes) filterAlerts(lvalues []string, resp *apiResponse) (interface{}, error) { + var data alertsData + if err := json.Unmarshal(resp.Data, &data); err != nil { + return nil, fmt.Errorf("can't decode alerts data: %w", err) + } + + filtered := []*alert{} + for _, alert := range data.Alerts { + for _, lbl := range alert.Labels { + if lbl.Name == r.label && slices.Contains(lvalues, lbl.Value) { + filtered = append(filtered, alert) + break + } + } + } + + return &alertsData{Alerts: filtered}, nil +} diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/silences.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/silences.go new file mode 100644 index 0000000000..7a09137d30 --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/silences.go @@ -0,0 +1,210 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "path" + "strconv" + "strings" + + runtimeclient "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/prometheus/alertmanager/api/v2/client" + "github.com/prometheus/alertmanager/api/v2/client/silence" + "github.com/prometheus/alertmanager/api/v2/models" + "github.com/prometheus/alertmanager/pkg/labels" +) + +// silences proxies HTTP requests to the Alertmanager /api/v2/silences endpoint. +func (r *routes) silences(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case "GET": + r.enforceFilterParameter(w, req) + case "POST": + r.postSilence(w, req) + default: + http.NotFound(w, req) + } +} + +// assertSingleLabelValue verifies that the proxy is configured to match only +// one label value. If not, it will reply with "422 Unprocessable Content". +func assertSingleLabelValue(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + labelValues := MustLabelValues(req.Context()) + if len(labelValues) > 1 { + http.Error(w, "Multiple label matchers not supported", http.StatusUnprocessableEntity) + return + } + + next(w, req) + } +} + +// enforceFilterParameter injects a label matcher parameter into the +// Alertmanager API's query. +func (r *routes) enforceFilterParameter(w http.ResponseWriter, req *http.Request) { + var ( + q = req.URL.Query() + proxyLabelMatch labels.Matcher + ) + + if len(MustLabelValues(req.Context())) > 1 { + proxyLabelMatch = labels.Matcher{ + Type: labels.MatchRegexp, + Name: r.label, + Value: labelValuesToRegexpString(MustLabelValues(req.Context())), + } + } else { + proxyLabelMatch = labels.Matcher{ + Type: labels.MatchEqual, + Name: r.label, + Value: MustLabelValue(req.Context()), + } + } + + modified := []string{proxyLabelMatch.String()} + for _, filter := range q["filter"] { + m, err := labels.ParseMatcher(filter) + if err != nil { + prometheusAPIError(w, fmt.Sprintf("bad request: can't parse filter %q: %v", filter, err), http.StatusBadRequest) + return + } + + // Keep the original matcher in case of multi label values because + // the user might want to filter on a specific value. + if m.Name == r.label && proxyLabelMatch.Type != labels.MatchRegexp { + continue + } + + modified = append(modified, filter) + } + + q["filter"] = modified + q.Del(r.label) + req.URL.RawQuery = q.Encode() + + r.handler.ServeHTTP(w, req) +} + +func (r *routes) postSilence(w http.ResponseWriter, req *http.Request) { + var ( + sil models.PostableSilence + lvalue = MustLabelValue(req.Context()) + ) + + if err := json.NewDecoder(req.Body).Decode(&sil); err != nil { + prometheusAPIError(w, fmt.Sprintf("bad request: can't decode: %v", err), http.StatusBadRequest) + return + } + + if sil.ID != "" { + // This is an update for an existing silence. + existing, err := r.getSilenceByID(req.Context(), sil.ID) + if err != nil { + prometheusAPIError(w, fmt.Sprintf("proxy error: can't get silence: %v", err), http.StatusBadGateway) + return + } + + if !hasMatcherForLabel(existing.Matchers, r.label, lvalue) { + prometheusAPIError(w, "forbidden", http.StatusForbidden) + return + } + } + + var falsy bool + modified := models.Matchers{ + &models.Matcher{Name: &(r.label), Value: &lvalue, IsRegex: &falsy}, + } + for _, m := range sil.Matchers { + if m.Name != nil && *m.Name == r.label { + continue + } + modified = append(modified, m) + } + // At least one matcher in addition to the enforced label is required, + // otherwise all alerts would be silenced + if len(modified) < 2 { + prometheusAPIError(w, "need at least one matcher, got none", http.StatusBadRequest) + return + } + sil.Matchers = modified + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(&sil); err != nil { + prometheusAPIError(w, fmt.Sprintf("can't encode: %v", err), http.StatusInternalServerError) + return + } + + req = req.Clone(req.Context()) + req.Body = io.NopCloser(&buf) + req.URL.RawQuery = "" + req.Header["Content-Length"] = []string{strconv.Itoa(buf.Len())} + req.ContentLength = int64(buf.Len()) + + r.handler.ServeHTTP(w, req) +} + +// deleteSilence proxies HTTP requests to the Alertmanager /api/v2/silence/ endpoint. +func (r *routes) deleteSilence(w http.ResponseWriter, req *http.Request) { + silID := strings.TrimPrefix(req.URL.Path, "/api/v2/silence/") + if silID == "" || silID == req.URL.Path { + prometheusAPIError(w, "bad request", http.StatusBadRequest) + return + } + + // Get the silence by ID and verify that it has the expected label. + sil, err := r.getSilenceByID(req.Context(), silID) + if err != nil { + prometheusAPIError(w, fmt.Sprintf("proxy error: %v", err), http.StatusBadGateway) + return + } + + if !hasMatcherForLabel(sil.Matchers, r.label, MustLabelValue(req.Context())) { + prometheusAPIError(w, "forbidden", http.StatusForbidden) + return + } + + req.URL.RawQuery = "" + r.handler.ServeHTTP(w, req) +} + +func (r *routes) getSilenceByID(ctx context.Context, id string) (*models.GettableSilence, error) { + amc := client.New( + runtimeclient.New(r.upstream.Host, path.Join(r.upstream.Path, "/api/v2"), []string{r.upstream.Scheme}), + strfmt.Default, + ) + params := silence.NewGetSilenceParams().WithContext(ctx) + params.SetSilenceID(strfmt.UUID(id)) + sil, err := amc.Silence.GetSilence(params) + if err != nil { + return nil, err + } + return sil.Payload, nil +} + +func hasMatcherForLabel(matchers models.Matchers, name, value string) bool { + for _, m := range matchers { + if *m.Name == name && !*m.IsRegex && *m.Value == value { + return true + } + } + return false +} diff --git a/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/utils.go b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/utils.go new file mode 100644 index 0000000000..aacc9abef9 --- /dev/null +++ b/vendor/github.com/prometheus-community/prom-label-proxy/injectproxy/utils.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package injectproxy + +import ( + "encoding/json" + "log" + "net/http" +) + +func prometheusAPIError(w http.ResponseWriter, errorMessage string, code int) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(code) + + res := map[string]string{"status": "error", "errorType": "prom-label-proxy", "error": errorMessage} + + if err := json.NewEncoder(w).Encode(res); err != nil { + log.Printf("error: Failed to encode json: %v", err) + } +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/alert_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/alert_client.go new file mode 100644 index 0000000000..e30e3de9c5 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/alert_client.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alert + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new alert API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for alert API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetAlerts(params *GetAlertsParams, opts ...ClientOption) (*GetAlertsOK, error) + + PostAlerts(params *PostAlertsParams, opts ...ClientOption) (*PostAlertsOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetAlerts Get a list of alerts +*/ +func (a *Client) GetAlerts(params *GetAlertsParams, opts ...ClientOption) (*GetAlertsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetAlertsParams() + } + op := &runtime.ClientOperation{ + ID: "getAlerts", + Method: "GET", + PathPattern: "/alerts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetAlertsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetAlertsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getAlerts: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +PostAlerts Create new Alerts +*/ +func (a *Client) PostAlerts(params *PostAlertsParams, opts ...ClientOption) (*PostAlertsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPostAlertsParams() + } + op := &runtime.ClientOperation{ + ID: "postAlerts", + Method: "POST", + PathPattern: "/alerts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PostAlertsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*PostAlertsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for postAlerts: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_parameters.go new file mode 100644 index 0000000000..89ff1880ee --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_parameters.go @@ -0,0 +1,387 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alert + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetAlertsParams creates a new GetAlertsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetAlertsParams() *GetAlertsParams { + return &GetAlertsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetAlertsParamsWithTimeout creates a new GetAlertsParams object +// with the ability to set a timeout on a request. +func NewGetAlertsParamsWithTimeout(timeout time.Duration) *GetAlertsParams { + return &GetAlertsParams{ + timeout: timeout, + } +} + +// NewGetAlertsParamsWithContext creates a new GetAlertsParams object +// with the ability to set a context for a request. +func NewGetAlertsParamsWithContext(ctx context.Context) *GetAlertsParams { + return &GetAlertsParams{ + Context: ctx, + } +} + +// NewGetAlertsParamsWithHTTPClient creates a new GetAlertsParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetAlertsParamsWithHTTPClient(client *http.Client) *GetAlertsParams { + return &GetAlertsParams{ + HTTPClient: client, + } +} + +/* +GetAlertsParams contains all the parameters to send to the API endpoint + + for the get alerts operation. + + Typically these are written to a http.Request. +*/ +type GetAlertsParams struct { + + /* Active. + + Show active alerts + + Default: true + */ + Active *bool + + /* Filter. + + A list of matchers to filter alerts by + */ + Filter []string + + /* Inhibited. + + Show inhibited alerts + + Default: true + */ + Inhibited *bool + + /* Receiver. + + A regex matching receivers to filter alerts by + */ + Receiver *string + + /* Silenced. + + Show silenced alerts + + Default: true + */ + Silenced *bool + + /* Unprocessed. + + Show unprocessed alerts + + Default: true + */ + Unprocessed *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get alerts params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetAlertsParams) WithDefaults() *GetAlertsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get alerts params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetAlertsParams) SetDefaults() { + var ( + activeDefault = bool(true) + + inhibitedDefault = bool(true) + + silencedDefault = bool(true) + + unprocessedDefault = bool(true) + ) + + val := GetAlertsParams{ + Active: &activeDefault, + Inhibited: &inhibitedDefault, + Silenced: &silencedDefault, + Unprocessed: &unprocessedDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get alerts params +func (o *GetAlertsParams) WithTimeout(timeout time.Duration) *GetAlertsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get alerts params +func (o *GetAlertsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get alerts params +func (o *GetAlertsParams) WithContext(ctx context.Context) *GetAlertsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get alerts params +func (o *GetAlertsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get alerts params +func (o *GetAlertsParams) WithHTTPClient(client *http.Client) *GetAlertsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get alerts params +func (o *GetAlertsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithActive adds the active to the get alerts params +func (o *GetAlertsParams) WithActive(active *bool) *GetAlertsParams { + o.SetActive(active) + return o +} + +// SetActive adds the active to the get alerts params +func (o *GetAlertsParams) SetActive(active *bool) { + o.Active = active +} + +// WithFilter adds the filter to the get alerts params +func (o *GetAlertsParams) WithFilter(filter []string) *GetAlertsParams { + o.SetFilter(filter) + return o +} + +// SetFilter adds the filter to the get alerts params +func (o *GetAlertsParams) SetFilter(filter []string) { + o.Filter = filter +} + +// WithInhibited adds the inhibited to the get alerts params +func (o *GetAlertsParams) WithInhibited(inhibited *bool) *GetAlertsParams { + o.SetInhibited(inhibited) + return o +} + +// SetInhibited adds the inhibited to the get alerts params +func (o *GetAlertsParams) SetInhibited(inhibited *bool) { + o.Inhibited = inhibited +} + +// WithReceiver adds the receiver to the get alerts params +func (o *GetAlertsParams) WithReceiver(receiver *string) *GetAlertsParams { + o.SetReceiver(receiver) + return o +} + +// SetReceiver adds the receiver to the get alerts params +func (o *GetAlertsParams) SetReceiver(receiver *string) { + o.Receiver = receiver +} + +// WithSilenced adds the silenced to the get alerts params +func (o *GetAlertsParams) WithSilenced(silenced *bool) *GetAlertsParams { + o.SetSilenced(silenced) + return o +} + +// SetSilenced adds the silenced to the get alerts params +func (o *GetAlertsParams) SetSilenced(silenced *bool) { + o.Silenced = silenced +} + +// WithUnprocessed adds the unprocessed to the get alerts params +func (o *GetAlertsParams) WithUnprocessed(unprocessed *bool) *GetAlertsParams { + o.SetUnprocessed(unprocessed) + return o +} + +// SetUnprocessed adds the unprocessed to the get alerts params +func (o *GetAlertsParams) SetUnprocessed(unprocessed *bool) { + o.Unprocessed = unprocessed +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAlertsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Active != nil { + + // query param active + var qrActive bool + + if o.Active != nil { + qrActive = *o.Active + } + qActive := swag.FormatBool(qrActive) + if qActive != "" { + + if err := r.SetQueryParam("active", qActive); err != nil { + return err + } + } + } + + if o.Filter != nil { + + // binding items for filter + joinedFilter := o.bindParamFilter(reg) + + // query array param filter + if err := r.SetQueryParam("filter", joinedFilter...); err != nil { + return err + } + } + + if o.Inhibited != nil { + + // query param inhibited + var qrInhibited bool + + if o.Inhibited != nil { + qrInhibited = *o.Inhibited + } + qInhibited := swag.FormatBool(qrInhibited) + if qInhibited != "" { + + if err := r.SetQueryParam("inhibited", qInhibited); err != nil { + return err + } + } + } + + if o.Receiver != nil { + + // query param receiver + var qrReceiver string + + if o.Receiver != nil { + qrReceiver = *o.Receiver + } + qReceiver := qrReceiver + if qReceiver != "" { + + if err := r.SetQueryParam("receiver", qReceiver); err != nil { + return err + } + } + } + + if o.Silenced != nil { + + // query param silenced + var qrSilenced bool + + if o.Silenced != nil { + qrSilenced = *o.Silenced + } + qSilenced := swag.FormatBool(qrSilenced) + if qSilenced != "" { + + if err := r.SetQueryParam("silenced", qSilenced); err != nil { + return err + } + } + } + + if o.Unprocessed != nil { + + // query param unprocessed + var qrUnprocessed bool + + if o.Unprocessed != nil { + qrUnprocessed = *o.Unprocessed + } + qUnprocessed := swag.FormatBool(qrUnprocessed) + if qUnprocessed != "" { + + if err := r.SetQueryParam("unprocessed", qUnprocessed); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindParamGetAlerts binds the parameter filter +func (o *GetAlertsParams) bindParamFilter(formats strfmt.Registry) []string { + filterIR := o.Filter + + var filterIC []string + for _, filterIIR := range filterIR { // explode []string + + filterIIV := filterIIR // string as string + filterIC = append(filterIC, filterIIV) + } + + // items.CollectionFormat: "multi" + filterIS := swag.JoinByFormat(filterIC, "multi") + + return filterIS +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_responses.go new file mode 100644 index 0000000000..516dbc31d9 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/get_alerts_responses.go @@ -0,0 +1,244 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alert + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetAlertsReader is a Reader for the GetAlerts structure. +type GetAlertsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAlertsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetAlertsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetAlertsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetAlertsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetAlertsOK creates a GetAlertsOK with default headers values +func NewGetAlertsOK() *GetAlertsOK { + return &GetAlertsOK{} +} + +/* +GetAlertsOK describes a response with status code 200, with default header values. + +Get alerts response +*/ +type GetAlertsOK struct { + Payload models.GettableAlerts +} + +// IsSuccess returns true when this get alerts o k response has a 2xx status code +func (o *GetAlertsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get alerts o k response has a 3xx status code +func (o *GetAlertsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alerts o k response has a 4xx status code +func (o *GetAlertsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get alerts o k response has a 5xx status code +func (o *GetAlertsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get alerts o k response a status code equal to that given +func (o *GetAlertsOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetAlertsOK) Error() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsOK %+v", 200, o.Payload) +} + +func (o *GetAlertsOK) String() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsOK %+v", 200, o.Payload) +} + +func (o *GetAlertsOK) GetPayload() models.GettableAlerts { + return o.Payload +} + +func (o *GetAlertsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAlertsBadRequest creates a GetAlertsBadRequest with default headers values +func NewGetAlertsBadRequest() *GetAlertsBadRequest { + return &GetAlertsBadRequest{} +} + +/* +GetAlertsBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetAlertsBadRequest struct { + Payload string +} + +// IsSuccess returns true when this get alerts bad request response has a 2xx status code +func (o *GetAlertsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get alerts bad request response has a 3xx status code +func (o *GetAlertsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alerts bad request response has a 4xx status code +func (o *GetAlertsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get alerts bad request response has a 5xx status code +func (o *GetAlertsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get alerts bad request response a status code equal to that given +func (o *GetAlertsBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *GetAlertsBadRequest) Error() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsBadRequest %+v", 400, o.Payload) +} + +func (o *GetAlertsBadRequest) String() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsBadRequest %+v", 400, o.Payload) +} + +func (o *GetAlertsBadRequest) GetPayload() string { + return o.Payload +} + +func (o *GetAlertsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAlertsInternalServerError creates a GetAlertsInternalServerError with default headers values +func NewGetAlertsInternalServerError() *GetAlertsInternalServerError { + return &GetAlertsInternalServerError{} +} + +/* +GetAlertsInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type GetAlertsInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this get alerts internal server error response has a 2xx status code +func (o *GetAlertsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get alerts internal server error response has a 3xx status code +func (o *GetAlertsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alerts internal server error response has a 4xx status code +func (o *GetAlertsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get alerts internal server error response has a 5xx status code +func (o *GetAlertsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get alerts internal server error response a status code equal to that given +func (o *GetAlertsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetAlertsInternalServerError) Error() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetAlertsInternalServerError) String() string { + return fmt.Sprintf("[GET /alerts][%d] getAlertsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetAlertsInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *GetAlertsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_parameters.go new file mode 100644 index 0000000000..ddeeda4008 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alert + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// NewPostAlertsParams creates a new PostAlertsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewPostAlertsParams() *PostAlertsParams { + return &PostAlertsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewPostAlertsParamsWithTimeout creates a new PostAlertsParams object +// with the ability to set a timeout on a request. +func NewPostAlertsParamsWithTimeout(timeout time.Duration) *PostAlertsParams { + return &PostAlertsParams{ + timeout: timeout, + } +} + +// NewPostAlertsParamsWithContext creates a new PostAlertsParams object +// with the ability to set a context for a request. +func NewPostAlertsParamsWithContext(ctx context.Context) *PostAlertsParams { + return &PostAlertsParams{ + Context: ctx, + } +} + +// NewPostAlertsParamsWithHTTPClient creates a new PostAlertsParams object +// with the ability to set a custom HTTPClient for a request. +func NewPostAlertsParamsWithHTTPClient(client *http.Client) *PostAlertsParams { + return &PostAlertsParams{ + HTTPClient: client, + } +} + +/* +PostAlertsParams contains all the parameters to send to the API endpoint + + for the post alerts operation. + + Typically these are written to a http.Request. +*/ +type PostAlertsParams struct { + + /* Alerts. + + The alerts to create + */ + Alerts models.PostableAlerts + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the post alerts params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *PostAlertsParams) WithDefaults() *PostAlertsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the post alerts params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *PostAlertsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the post alerts params +func (o *PostAlertsParams) WithTimeout(timeout time.Duration) *PostAlertsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the post alerts params +func (o *PostAlertsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the post alerts params +func (o *PostAlertsParams) WithContext(ctx context.Context) *PostAlertsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the post alerts params +func (o *PostAlertsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the post alerts params +func (o *PostAlertsParams) WithHTTPClient(client *http.Client) *PostAlertsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the post alerts params +func (o *PostAlertsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAlerts adds the alerts to the post alerts params +func (o *PostAlertsParams) WithAlerts(alerts models.PostableAlerts) *PostAlertsParams { + o.SetAlerts(alerts) + return o +} + +// SetAlerts adds the alerts to the post alerts params +func (o *PostAlertsParams) SetAlerts(alerts models.PostableAlerts) { + o.Alerts = alerts +} + +// WriteToRequest writes these params to a swagger request +func (o *PostAlertsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Alerts != nil { + if err := r.SetBodyParam(o.Alerts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_responses.go new file mode 100644 index 0000000000..67be66c6af --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alert/post_alerts_responses.go @@ -0,0 +1,232 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alert + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// PostAlertsReader is a Reader for the PostAlerts structure. +type PostAlertsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PostAlertsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewPostAlertsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewPostAlertsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewPostAlertsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewPostAlertsOK creates a PostAlertsOK with default headers values +func NewPostAlertsOK() *PostAlertsOK { + return &PostAlertsOK{} +} + +/* +PostAlertsOK describes a response with status code 200, with default header values. + +Create alerts response +*/ +type PostAlertsOK struct { +} + +// IsSuccess returns true when this post alerts o k response has a 2xx status code +func (o *PostAlertsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this post alerts o k response has a 3xx status code +func (o *PostAlertsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post alerts o k response has a 4xx status code +func (o *PostAlertsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this post alerts o k response has a 5xx status code +func (o *PostAlertsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this post alerts o k response a status code equal to that given +func (o *PostAlertsOK) IsCode(code int) bool { + return code == 200 +} + +func (o *PostAlertsOK) Error() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsOK ", 200) +} + +func (o *PostAlertsOK) String() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsOK ", 200) +} + +func (o *PostAlertsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewPostAlertsBadRequest creates a PostAlertsBadRequest with default headers values +func NewPostAlertsBadRequest() *PostAlertsBadRequest { + return &PostAlertsBadRequest{} +} + +/* +PostAlertsBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type PostAlertsBadRequest struct { + Payload string +} + +// IsSuccess returns true when this post alerts bad request response has a 2xx status code +func (o *PostAlertsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this post alerts bad request response has a 3xx status code +func (o *PostAlertsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post alerts bad request response has a 4xx status code +func (o *PostAlertsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this post alerts bad request response has a 5xx status code +func (o *PostAlertsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this post alerts bad request response a status code equal to that given +func (o *PostAlertsBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *PostAlertsBadRequest) Error() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsBadRequest %+v", 400, o.Payload) +} + +func (o *PostAlertsBadRequest) String() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsBadRequest %+v", 400, o.Payload) +} + +func (o *PostAlertsBadRequest) GetPayload() string { + return o.Payload +} + +func (o *PostAlertsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPostAlertsInternalServerError creates a PostAlertsInternalServerError with default headers values +func NewPostAlertsInternalServerError() *PostAlertsInternalServerError { + return &PostAlertsInternalServerError{} +} + +/* +PostAlertsInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type PostAlertsInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this post alerts internal server error response has a 2xx status code +func (o *PostAlertsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this post alerts internal server error response has a 3xx status code +func (o *PostAlertsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post alerts internal server error response has a 4xx status code +func (o *PostAlertsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this post alerts internal server error response has a 5xx status code +func (o *PostAlertsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this post alerts internal server error response a status code equal to that given +func (o *PostAlertsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *PostAlertsInternalServerError) Error() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsInternalServerError %+v", 500, o.Payload) +} + +func (o *PostAlertsInternalServerError) String() string { + return fmt.Sprintf("[POST /alerts][%d] postAlertsInternalServerError %+v", 500, o.Payload) +} + +func (o *PostAlertsInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *PostAlertsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/alertgroup_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/alertgroup_client.go new file mode 100644 index 0000000000..1caf597d11 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/alertgroup_client.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alertgroup + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new alertgroup API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for alertgroup API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetAlertGroups(params *GetAlertGroupsParams, opts ...ClientOption) (*GetAlertGroupsOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetAlertGroups Get a list of alert groups +*/ +func (a *Client) GetAlertGroups(params *GetAlertGroupsParams, opts ...ClientOption) (*GetAlertGroupsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetAlertGroupsParams() + } + op := &runtime.ClientOperation{ + ID: "getAlertGroups", + Method: "GET", + PathPattern: "/alerts/groups", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetAlertGroupsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetAlertGroupsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getAlertGroups: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_parameters.go new file mode 100644 index 0000000000..1196ade8f1 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_parameters.go @@ -0,0 +1,348 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alertgroup + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetAlertGroupsParams creates a new GetAlertGroupsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetAlertGroupsParams() *GetAlertGroupsParams { + return &GetAlertGroupsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetAlertGroupsParamsWithTimeout creates a new GetAlertGroupsParams object +// with the ability to set a timeout on a request. +func NewGetAlertGroupsParamsWithTimeout(timeout time.Duration) *GetAlertGroupsParams { + return &GetAlertGroupsParams{ + timeout: timeout, + } +} + +// NewGetAlertGroupsParamsWithContext creates a new GetAlertGroupsParams object +// with the ability to set a context for a request. +func NewGetAlertGroupsParamsWithContext(ctx context.Context) *GetAlertGroupsParams { + return &GetAlertGroupsParams{ + Context: ctx, + } +} + +// NewGetAlertGroupsParamsWithHTTPClient creates a new GetAlertGroupsParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetAlertGroupsParamsWithHTTPClient(client *http.Client) *GetAlertGroupsParams { + return &GetAlertGroupsParams{ + HTTPClient: client, + } +} + +/* +GetAlertGroupsParams contains all the parameters to send to the API endpoint + + for the get alert groups operation. + + Typically these are written to a http.Request. +*/ +type GetAlertGroupsParams struct { + + /* Active. + + Show active alerts + + Default: true + */ + Active *bool + + /* Filter. + + A list of matchers to filter alerts by + */ + Filter []string + + /* Inhibited. + + Show inhibited alerts + + Default: true + */ + Inhibited *bool + + /* Receiver. + + A regex matching receivers to filter alerts by + */ + Receiver *string + + /* Silenced. + + Show silenced alerts + + Default: true + */ + Silenced *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get alert groups params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetAlertGroupsParams) WithDefaults() *GetAlertGroupsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get alert groups params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetAlertGroupsParams) SetDefaults() { + var ( + activeDefault = bool(true) + + inhibitedDefault = bool(true) + + silencedDefault = bool(true) + ) + + val := GetAlertGroupsParams{ + Active: &activeDefault, + Inhibited: &inhibitedDefault, + Silenced: &silencedDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get alert groups params +func (o *GetAlertGroupsParams) WithTimeout(timeout time.Duration) *GetAlertGroupsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get alert groups params +func (o *GetAlertGroupsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get alert groups params +func (o *GetAlertGroupsParams) WithContext(ctx context.Context) *GetAlertGroupsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get alert groups params +func (o *GetAlertGroupsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get alert groups params +func (o *GetAlertGroupsParams) WithHTTPClient(client *http.Client) *GetAlertGroupsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get alert groups params +func (o *GetAlertGroupsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithActive adds the active to the get alert groups params +func (o *GetAlertGroupsParams) WithActive(active *bool) *GetAlertGroupsParams { + o.SetActive(active) + return o +} + +// SetActive adds the active to the get alert groups params +func (o *GetAlertGroupsParams) SetActive(active *bool) { + o.Active = active +} + +// WithFilter adds the filter to the get alert groups params +func (o *GetAlertGroupsParams) WithFilter(filter []string) *GetAlertGroupsParams { + o.SetFilter(filter) + return o +} + +// SetFilter adds the filter to the get alert groups params +func (o *GetAlertGroupsParams) SetFilter(filter []string) { + o.Filter = filter +} + +// WithInhibited adds the inhibited to the get alert groups params +func (o *GetAlertGroupsParams) WithInhibited(inhibited *bool) *GetAlertGroupsParams { + o.SetInhibited(inhibited) + return o +} + +// SetInhibited adds the inhibited to the get alert groups params +func (o *GetAlertGroupsParams) SetInhibited(inhibited *bool) { + o.Inhibited = inhibited +} + +// WithReceiver adds the receiver to the get alert groups params +func (o *GetAlertGroupsParams) WithReceiver(receiver *string) *GetAlertGroupsParams { + o.SetReceiver(receiver) + return o +} + +// SetReceiver adds the receiver to the get alert groups params +func (o *GetAlertGroupsParams) SetReceiver(receiver *string) { + o.Receiver = receiver +} + +// WithSilenced adds the silenced to the get alert groups params +func (o *GetAlertGroupsParams) WithSilenced(silenced *bool) *GetAlertGroupsParams { + o.SetSilenced(silenced) + return o +} + +// SetSilenced adds the silenced to the get alert groups params +func (o *GetAlertGroupsParams) SetSilenced(silenced *bool) { + o.Silenced = silenced +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAlertGroupsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Active != nil { + + // query param active + var qrActive bool + + if o.Active != nil { + qrActive = *o.Active + } + qActive := swag.FormatBool(qrActive) + if qActive != "" { + + if err := r.SetQueryParam("active", qActive); err != nil { + return err + } + } + } + + if o.Filter != nil { + + // binding items for filter + joinedFilter := o.bindParamFilter(reg) + + // query array param filter + if err := r.SetQueryParam("filter", joinedFilter...); err != nil { + return err + } + } + + if o.Inhibited != nil { + + // query param inhibited + var qrInhibited bool + + if o.Inhibited != nil { + qrInhibited = *o.Inhibited + } + qInhibited := swag.FormatBool(qrInhibited) + if qInhibited != "" { + + if err := r.SetQueryParam("inhibited", qInhibited); err != nil { + return err + } + } + } + + if o.Receiver != nil { + + // query param receiver + var qrReceiver string + + if o.Receiver != nil { + qrReceiver = *o.Receiver + } + qReceiver := qrReceiver + if qReceiver != "" { + + if err := r.SetQueryParam("receiver", qReceiver); err != nil { + return err + } + } + } + + if o.Silenced != nil { + + // query param silenced + var qrSilenced bool + + if o.Silenced != nil { + qrSilenced = *o.Silenced + } + qSilenced := swag.FormatBool(qrSilenced) + if qSilenced != "" { + + if err := r.SetQueryParam("silenced", qSilenced); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindParamGetAlertGroups binds the parameter filter +func (o *GetAlertGroupsParams) bindParamFilter(formats strfmt.Registry) []string { + filterIR := o.Filter + + var filterIC []string + for _, filterIIR := range filterIR { // explode []string + + filterIIV := filterIIR // string as string + filterIC = append(filterIC, filterIIV) + } + + // items.CollectionFormat: "multi" + filterIS := swag.JoinByFormat(filterIC, "multi") + + return filterIS +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_responses.go new file mode 100644 index 0000000000..f348da944e --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertgroup/get_alert_groups_responses.go @@ -0,0 +1,244 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package alertgroup + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetAlertGroupsReader is a Reader for the GetAlertGroups structure. +type GetAlertGroupsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAlertGroupsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetAlertGroupsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetAlertGroupsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetAlertGroupsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetAlertGroupsOK creates a GetAlertGroupsOK with default headers values +func NewGetAlertGroupsOK() *GetAlertGroupsOK { + return &GetAlertGroupsOK{} +} + +/* +GetAlertGroupsOK describes a response with status code 200, with default header values. + +Get alert groups response +*/ +type GetAlertGroupsOK struct { + Payload models.AlertGroups +} + +// IsSuccess returns true when this get alert groups o k response has a 2xx status code +func (o *GetAlertGroupsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get alert groups o k response has a 3xx status code +func (o *GetAlertGroupsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alert groups o k response has a 4xx status code +func (o *GetAlertGroupsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get alert groups o k response has a 5xx status code +func (o *GetAlertGroupsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get alert groups o k response a status code equal to that given +func (o *GetAlertGroupsOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetAlertGroupsOK) Error() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsOK %+v", 200, o.Payload) +} + +func (o *GetAlertGroupsOK) String() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsOK %+v", 200, o.Payload) +} + +func (o *GetAlertGroupsOK) GetPayload() models.AlertGroups { + return o.Payload +} + +func (o *GetAlertGroupsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAlertGroupsBadRequest creates a GetAlertGroupsBadRequest with default headers values +func NewGetAlertGroupsBadRequest() *GetAlertGroupsBadRequest { + return &GetAlertGroupsBadRequest{} +} + +/* +GetAlertGroupsBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetAlertGroupsBadRequest struct { + Payload string +} + +// IsSuccess returns true when this get alert groups bad request response has a 2xx status code +func (o *GetAlertGroupsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get alert groups bad request response has a 3xx status code +func (o *GetAlertGroupsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alert groups bad request response has a 4xx status code +func (o *GetAlertGroupsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get alert groups bad request response has a 5xx status code +func (o *GetAlertGroupsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get alert groups bad request response a status code equal to that given +func (o *GetAlertGroupsBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *GetAlertGroupsBadRequest) Error() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsBadRequest %+v", 400, o.Payload) +} + +func (o *GetAlertGroupsBadRequest) String() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsBadRequest %+v", 400, o.Payload) +} + +func (o *GetAlertGroupsBadRequest) GetPayload() string { + return o.Payload +} + +func (o *GetAlertGroupsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAlertGroupsInternalServerError creates a GetAlertGroupsInternalServerError with default headers values +func NewGetAlertGroupsInternalServerError() *GetAlertGroupsInternalServerError { + return &GetAlertGroupsInternalServerError{} +} + +/* +GetAlertGroupsInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type GetAlertGroupsInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this get alert groups internal server error response has a 2xx status code +func (o *GetAlertGroupsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get alert groups internal server error response has a 3xx status code +func (o *GetAlertGroupsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get alert groups internal server error response has a 4xx status code +func (o *GetAlertGroupsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get alert groups internal server error response has a 5xx status code +func (o *GetAlertGroupsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get alert groups internal server error response a status code equal to that given +func (o *GetAlertGroupsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetAlertGroupsInternalServerError) Error() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetAlertGroupsInternalServerError) String() string { + return fmt.Sprintf("[GET /alerts/groups][%d] getAlertGroupsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetAlertGroupsInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *GetAlertGroupsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/alertmanager_api_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertmanager_api_client.go new file mode 100644 index 0000000000..5b7c56e5e5 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/alertmanager_api_client.go @@ -0,0 +1,146 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/client/alert" + "github.com/prometheus/alertmanager/api/v2/client/alertgroup" + "github.com/prometheus/alertmanager/api/v2/client/general" + "github.com/prometheus/alertmanager/api/v2/client/receiver" + "github.com/prometheus/alertmanager/api/v2/client/silence" +) + +// Default alertmanager API HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "localhost" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/api/v2/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new alertmanager API HTTP client. +func NewHTTPClient(formats strfmt.Registry) *AlertmanagerAPI { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new alertmanager API HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *AlertmanagerAPI { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new alertmanager API client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *AlertmanagerAPI { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(AlertmanagerAPI) + cli.Transport = transport + cli.Alert = alert.New(transport, formats) + cli.Alertgroup = alertgroup.New(transport, formats) + cli.General = general.New(transport, formats) + cli.Receiver = receiver.New(transport, formats) + cli.Silence = silence.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// AlertmanagerAPI is a client for alertmanager API +type AlertmanagerAPI struct { + Alert alert.ClientService + + Alertgroup alertgroup.ClientService + + General general.ClientService + + Receiver receiver.ClientService + + Silence silence.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *AlertmanagerAPI) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Alert.SetTransport(transport) + c.Alertgroup.SetTransport(transport) + c.General.SetTransport(transport) + c.Receiver.SetTransport(transport) + c.Silence.SetTransport(transport) +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/general/general_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/general_client.go new file mode 100644 index 0000000000..e5bade72bb --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/general_client.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package general + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new general API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for general API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetStatus Get current status of an Alertmanager instance and its cluster +*/ +func (a *Client) GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetStatusParams() + } + op := &runtime.ClientOperation{ + ID: "getStatus", + Method: "GET", + PathPattern: "/status", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetStatusReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetStatusOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getStatus: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_parameters.go new file mode 100644 index 0000000000..cb33da4e85 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_parameters.go @@ -0,0 +1,142 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package general + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetStatusParams creates a new GetStatusParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetStatusParams() *GetStatusParams { + return &GetStatusParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetStatusParamsWithTimeout creates a new GetStatusParams object +// with the ability to set a timeout on a request. +func NewGetStatusParamsWithTimeout(timeout time.Duration) *GetStatusParams { + return &GetStatusParams{ + timeout: timeout, + } +} + +// NewGetStatusParamsWithContext creates a new GetStatusParams object +// with the ability to set a context for a request. +func NewGetStatusParamsWithContext(ctx context.Context) *GetStatusParams { + return &GetStatusParams{ + Context: ctx, + } +} + +// NewGetStatusParamsWithHTTPClient creates a new GetStatusParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetStatusParamsWithHTTPClient(client *http.Client) *GetStatusParams { + return &GetStatusParams{ + HTTPClient: client, + } +} + +/* +GetStatusParams contains all the parameters to send to the API endpoint + + for the get status operation. + + Typically these are written to a http.Request. +*/ +type GetStatusParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatusParams) WithDefaults() *GetStatusParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatusParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get status params +func (o *GetStatusParams) WithTimeout(timeout time.Duration) *GetStatusParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get status params +func (o *GetStatusParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get status params +func (o *GetStatusParams) WithContext(ctx context.Context) *GetStatusParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get status params +func (o *GetStatusParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get status params +func (o *GetStatusParams) WithHTTPClient(client *http.Client) *GetStatusParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get status params +func (o *GetStatusParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_responses.go new file mode 100644 index 0000000000..ddd6b2a2d7 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/general/get_status_responses.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package general + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetStatusReader is a Reader for the GetStatus structure. +type GetStatusReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetStatusOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetStatusOK creates a GetStatusOK with default headers values +func NewGetStatusOK() *GetStatusOK { + return &GetStatusOK{} +} + +/* +GetStatusOK describes a response with status code 200, with default header values. + +Get status response +*/ +type GetStatusOK struct { + Payload *models.AlertmanagerStatus +} + +// IsSuccess returns true when this get status o k response has a 2xx status code +func (o *GetStatusOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get status o k response has a 3xx status code +func (o *GetStatusOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get status o k response has a 4xx status code +func (o *GetStatusOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get status o k response has a 5xx status code +func (o *GetStatusOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get status o k response a status code equal to that given +func (o *GetStatusOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetStatusOK) Error() string { + return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) +} + +func (o *GetStatusOK) String() string { + return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) +} + +func (o *GetStatusOK) GetPayload() *models.AlertmanagerStatus { + return o.Payload +} + +func (o *GetStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AlertmanagerStatus) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_parameters.go new file mode 100644 index 0000000000..b468b3c7ef --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_parameters.go @@ -0,0 +1,142 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package receiver + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetReceiversParams creates a new GetReceiversParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetReceiversParams() *GetReceiversParams { + return &GetReceiversParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetReceiversParamsWithTimeout creates a new GetReceiversParams object +// with the ability to set a timeout on a request. +func NewGetReceiversParamsWithTimeout(timeout time.Duration) *GetReceiversParams { + return &GetReceiversParams{ + timeout: timeout, + } +} + +// NewGetReceiversParamsWithContext creates a new GetReceiversParams object +// with the ability to set a context for a request. +func NewGetReceiversParamsWithContext(ctx context.Context) *GetReceiversParams { + return &GetReceiversParams{ + Context: ctx, + } +} + +// NewGetReceiversParamsWithHTTPClient creates a new GetReceiversParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetReceiversParamsWithHTTPClient(client *http.Client) *GetReceiversParams { + return &GetReceiversParams{ + HTTPClient: client, + } +} + +/* +GetReceiversParams contains all the parameters to send to the API endpoint + + for the get receivers operation. + + Typically these are written to a http.Request. +*/ +type GetReceiversParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get receivers params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetReceiversParams) WithDefaults() *GetReceiversParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get receivers params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetReceiversParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get receivers params +func (o *GetReceiversParams) WithTimeout(timeout time.Duration) *GetReceiversParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get receivers params +func (o *GetReceiversParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get receivers params +func (o *GetReceiversParams) WithContext(ctx context.Context) *GetReceiversParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get receivers params +func (o *GetReceiversParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get receivers params +func (o *GetReceiversParams) WithHTTPClient(client *http.Client) *GetReceiversParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get receivers params +func (o *GetReceiversParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetReceiversParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_responses.go new file mode 100644 index 0000000000..c8fe599913 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/get_receivers_responses.go @@ -0,0 +1,110 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package receiver + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetReceiversReader is a Reader for the GetReceivers structure. +type GetReceiversReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetReceiversReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetReceiversOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetReceiversOK creates a GetReceiversOK with default headers values +func NewGetReceiversOK() *GetReceiversOK { + return &GetReceiversOK{} +} + +/* +GetReceiversOK describes a response with status code 200, with default header values. + +Get receivers response +*/ +type GetReceiversOK struct { + Payload []*models.Receiver +} + +// IsSuccess returns true when this get receivers o k response has a 2xx status code +func (o *GetReceiversOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get receivers o k response has a 3xx status code +func (o *GetReceiversOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get receivers o k response has a 4xx status code +func (o *GetReceiversOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get receivers o k response has a 5xx status code +func (o *GetReceiversOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get receivers o k response a status code equal to that given +func (o *GetReceiversOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetReceiversOK) Error() string { + return fmt.Sprintf("[GET /receivers][%d] getReceiversOK %+v", 200, o.Payload) +} + +func (o *GetReceiversOK) String() string { + return fmt.Sprintf("[GET /receivers][%d] getReceiversOK %+v", 200, o.Payload) +} + +func (o *GetReceiversOK) GetPayload() []*models.Receiver { + return o.Payload +} + +func (o *GetReceiversOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/receiver_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/receiver_client.go new file mode 100644 index 0000000000..22969c8d39 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/receiver/receiver_client.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package receiver + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new receiver API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for receiver API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetReceivers(params *GetReceiversParams, opts ...ClientOption) (*GetReceiversOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetReceivers Get list of all receivers (name of notification integrations) +*/ +func (a *Client) GetReceivers(params *GetReceiversParams, opts ...ClientOption) (*GetReceiversOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetReceiversParams() + } + op := &runtime.ClientOperation{ + ID: "getReceivers", + Method: "GET", + PathPattern: "/receivers", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetReceiversReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetReceiversOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getReceivers: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_parameters.go new file mode 100644 index 0000000000..3f8d1c1435 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeleteSilenceParams creates a new DeleteSilenceParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteSilenceParams() *DeleteSilenceParams { + return &DeleteSilenceParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteSilenceParamsWithTimeout creates a new DeleteSilenceParams object +// with the ability to set a timeout on a request. +func NewDeleteSilenceParamsWithTimeout(timeout time.Duration) *DeleteSilenceParams { + return &DeleteSilenceParams{ + timeout: timeout, + } +} + +// NewDeleteSilenceParamsWithContext creates a new DeleteSilenceParams object +// with the ability to set a context for a request. +func NewDeleteSilenceParamsWithContext(ctx context.Context) *DeleteSilenceParams { + return &DeleteSilenceParams{ + Context: ctx, + } +} + +// NewDeleteSilenceParamsWithHTTPClient creates a new DeleteSilenceParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteSilenceParamsWithHTTPClient(client *http.Client) *DeleteSilenceParams { + return &DeleteSilenceParams{ + HTTPClient: client, + } +} + +/* +DeleteSilenceParams contains all the parameters to send to the API endpoint + + for the delete silence operation. + + Typically these are written to a http.Request. +*/ +type DeleteSilenceParams struct { + + /* SilenceID. + + ID of the silence to get + + Format: uuid + */ + SilenceID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete silence params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteSilenceParams) WithDefaults() *DeleteSilenceParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete silence params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteSilenceParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete silence params +func (o *DeleteSilenceParams) WithTimeout(timeout time.Duration) *DeleteSilenceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete silence params +func (o *DeleteSilenceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete silence params +func (o *DeleteSilenceParams) WithContext(ctx context.Context) *DeleteSilenceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete silence params +func (o *DeleteSilenceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete silence params +func (o *DeleteSilenceParams) WithHTTPClient(client *http.Client) *DeleteSilenceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete silence params +func (o *DeleteSilenceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSilenceID adds the silenceID to the delete silence params +func (o *DeleteSilenceParams) WithSilenceID(silenceID strfmt.UUID) *DeleteSilenceParams { + o.SetSilenceID(silenceID) + return o +} + +// SetSilenceID adds the silenceId to the delete silence params +func (o *DeleteSilenceParams) SetSilenceID(silenceID strfmt.UUID) { + o.SilenceID = silenceID +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteSilenceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param silenceID + if err := r.SetPathParam("silenceID", o.SilenceID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_responses.go new file mode 100644 index 0000000000..c8b2bba1c7 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/delete_silence_responses.go @@ -0,0 +1,222 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// DeleteSilenceReader is a Reader for the DeleteSilence structure. +type DeleteSilenceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteSilenceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDeleteSilenceOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewDeleteSilenceNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeleteSilenceInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteSilenceOK creates a DeleteSilenceOK with default headers values +func NewDeleteSilenceOK() *DeleteSilenceOK { + return &DeleteSilenceOK{} +} + +/* +DeleteSilenceOK describes a response with status code 200, with default header values. + +Delete silence response +*/ +type DeleteSilenceOK struct { +} + +// IsSuccess returns true when this delete silence o k response has a 2xx status code +func (o *DeleteSilenceOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete silence o k response has a 3xx status code +func (o *DeleteSilenceOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete silence o k response has a 4xx status code +func (o *DeleteSilenceOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete silence o k response has a 5xx status code +func (o *DeleteSilenceOK) IsServerError() bool { + return false +} + +// IsCode returns true when this delete silence o k response a status code equal to that given +func (o *DeleteSilenceOK) IsCode(code int) bool { + return code == 200 +} + +func (o *DeleteSilenceOK) Error() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceOK ", 200) +} + +func (o *DeleteSilenceOK) String() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceOK ", 200) +} + +func (o *DeleteSilenceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteSilenceNotFound creates a DeleteSilenceNotFound with default headers values +func NewDeleteSilenceNotFound() *DeleteSilenceNotFound { + return &DeleteSilenceNotFound{} +} + +/* +DeleteSilenceNotFound describes a response with status code 404, with default header values. + +A silence with the specified ID was not found +*/ +type DeleteSilenceNotFound struct { +} + +// IsSuccess returns true when this delete silence not found response has a 2xx status code +func (o *DeleteSilenceNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete silence not found response has a 3xx status code +func (o *DeleteSilenceNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete silence not found response has a 4xx status code +func (o *DeleteSilenceNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete silence not found response has a 5xx status code +func (o *DeleteSilenceNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete silence not found response a status code equal to that given +func (o *DeleteSilenceNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *DeleteSilenceNotFound) Error() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceNotFound ", 404) +} + +func (o *DeleteSilenceNotFound) String() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceNotFound ", 404) +} + +func (o *DeleteSilenceNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteSilenceInternalServerError creates a DeleteSilenceInternalServerError with default headers values +func NewDeleteSilenceInternalServerError() *DeleteSilenceInternalServerError { + return &DeleteSilenceInternalServerError{} +} + +/* +DeleteSilenceInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type DeleteSilenceInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this delete silence internal server error response has a 2xx status code +func (o *DeleteSilenceInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete silence internal server error response has a 3xx status code +func (o *DeleteSilenceInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete silence internal server error response has a 4xx status code +func (o *DeleteSilenceInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete silence internal server error response has a 5xx status code +func (o *DeleteSilenceInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete silence internal server error response a status code equal to that given +func (o *DeleteSilenceInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *DeleteSilenceInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteSilenceInternalServerError) String() string { + return fmt.Sprintf("[DELETE /silence/{silenceID}][%d] deleteSilenceInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteSilenceInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *DeleteSilenceInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_parameters.go new file mode 100644 index 0000000000..fa7c23fe7c --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetSilenceParams creates a new GetSilenceParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetSilenceParams() *GetSilenceParams { + return &GetSilenceParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetSilenceParamsWithTimeout creates a new GetSilenceParams object +// with the ability to set a timeout on a request. +func NewGetSilenceParamsWithTimeout(timeout time.Duration) *GetSilenceParams { + return &GetSilenceParams{ + timeout: timeout, + } +} + +// NewGetSilenceParamsWithContext creates a new GetSilenceParams object +// with the ability to set a context for a request. +func NewGetSilenceParamsWithContext(ctx context.Context) *GetSilenceParams { + return &GetSilenceParams{ + Context: ctx, + } +} + +// NewGetSilenceParamsWithHTTPClient creates a new GetSilenceParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetSilenceParamsWithHTTPClient(client *http.Client) *GetSilenceParams { + return &GetSilenceParams{ + HTTPClient: client, + } +} + +/* +GetSilenceParams contains all the parameters to send to the API endpoint + + for the get silence operation. + + Typically these are written to a http.Request. +*/ +type GetSilenceParams struct { + + /* SilenceID. + + ID of the silence to get + + Format: uuid + */ + SilenceID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get silence params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetSilenceParams) WithDefaults() *GetSilenceParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get silence params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetSilenceParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get silence params +func (o *GetSilenceParams) WithTimeout(timeout time.Duration) *GetSilenceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get silence params +func (o *GetSilenceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get silence params +func (o *GetSilenceParams) WithContext(ctx context.Context) *GetSilenceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get silence params +func (o *GetSilenceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get silence params +func (o *GetSilenceParams) WithHTTPClient(client *http.Client) *GetSilenceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get silence params +func (o *GetSilenceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSilenceID adds the silenceID to the get silence params +func (o *GetSilenceParams) WithSilenceID(silenceID strfmt.UUID) *GetSilenceParams { + o.SetSilenceID(silenceID) + return o +} + +// SetSilenceID adds the silenceId to the get silence params +func (o *GetSilenceParams) SetSilenceID(silenceID strfmt.UUID) { + o.SilenceID = silenceID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetSilenceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param silenceID + if err := r.SetPathParam("silenceID", o.SilenceID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_responses.go new file mode 100644 index 0000000000..6cfdb73bf2 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silence_responses.go @@ -0,0 +1,236 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetSilenceReader is a Reader for the GetSilence structure. +type GetSilenceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetSilenceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetSilenceOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetSilenceNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetSilenceInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetSilenceOK creates a GetSilenceOK with default headers values +func NewGetSilenceOK() *GetSilenceOK { + return &GetSilenceOK{} +} + +/* +GetSilenceOK describes a response with status code 200, with default header values. + +Get silence response +*/ +type GetSilenceOK struct { + Payload *models.GettableSilence +} + +// IsSuccess returns true when this get silence o k response has a 2xx status code +func (o *GetSilenceOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get silence o k response has a 3xx status code +func (o *GetSilenceOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get silence o k response has a 4xx status code +func (o *GetSilenceOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get silence o k response has a 5xx status code +func (o *GetSilenceOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get silence o k response a status code equal to that given +func (o *GetSilenceOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetSilenceOK) Error() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceOK %+v", 200, o.Payload) +} + +func (o *GetSilenceOK) String() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceOK %+v", 200, o.Payload) +} + +func (o *GetSilenceOK) GetPayload() *models.GettableSilence { + return o.Payload +} + +func (o *GetSilenceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.GettableSilence) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetSilenceNotFound creates a GetSilenceNotFound with default headers values +func NewGetSilenceNotFound() *GetSilenceNotFound { + return &GetSilenceNotFound{} +} + +/* +GetSilenceNotFound describes a response with status code 404, with default header values. + +A silence with the specified ID was not found +*/ +type GetSilenceNotFound struct { +} + +// IsSuccess returns true when this get silence not found response has a 2xx status code +func (o *GetSilenceNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get silence not found response has a 3xx status code +func (o *GetSilenceNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get silence not found response has a 4xx status code +func (o *GetSilenceNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get silence not found response has a 5xx status code +func (o *GetSilenceNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get silence not found response a status code equal to that given +func (o *GetSilenceNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *GetSilenceNotFound) Error() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceNotFound ", 404) +} + +func (o *GetSilenceNotFound) String() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceNotFound ", 404) +} + +func (o *GetSilenceNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetSilenceInternalServerError creates a GetSilenceInternalServerError with default headers values +func NewGetSilenceInternalServerError() *GetSilenceInternalServerError { + return &GetSilenceInternalServerError{} +} + +/* +GetSilenceInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type GetSilenceInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this get silence internal server error response has a 2xx status code +func (o *GetSilenceInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get silence internal server error response has a 3xx status code +func (o *GetSilenceInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get silence internal server error response has a 4xx status code +func (o *GetSilenceInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get silence internal server error response has a 5xx status code +func (o *GetSilenceInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get silence internal server error response a status code equal to that given +func (o *GetSilenceInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetSilenceInternalServerError) Error() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceInternalServerError %+v", 500, o.Payload) +} + +func (o *GetSilenceInternalServerError) String() string { + return fmt.Sprintf("[GET /silence/{silenceID}][%d] getSilenceInternalServerError %+v", 500, o.Payload) +} + +func (o *GetSilenceInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *GetSilenceInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_parameters.go new file mode 100644 index 0000000000..d964efefa2 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_parameters.go @@ -0,0 +1,189 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetSilencesParams creates a new GetSilencesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetSilencesParams() *GetSilencesParams { + return &GetSilencesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetSilencesParamsWithTimeout creates a new GetSilencesParams object +// with the ability to set a timeout on a request. +func NewGetSilencesParamsWithTimeout(timeout time.Duration) *GetSilencesParams { + return &GetSilencesParams{ + timeout: timeout, + } +} + +// NewGetSilencesParamsWithContext creates a new GetSilencesParams object +// with the ability to set a context for a request. +func NewGetSilencesParamsWithContext(ctx context.Context) *GetSilencesParams { + return &GetSilencesParams{ + Context: ctx, + } +} + +// NewGetSilencesParamsWithHTTPClient creates a new GetSilencesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetSilencesParamsWithHTTPClient(client *http.Client) *GetSilencesParams { + return &GetSilencesParams{ + HTTPClient: client, + } +} + +/* +GetSilencesParams contains all the parameters to send to the API endpoint + + for the get silences operation. + + Typically these are written to a http.Request. +*/ +type GetSilencesParams struct { + + /* Filter. + + A list of matchers to filter silences by + */ + Filter []string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get silences params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetSilencesParams) WithDefaults() *GetSilencesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get silences params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetSilencesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get silences params +func (o *GetSilencesParams) WithTimeout(timeout time.Duration) *GetSilencesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get silences params +func (o *GetSilencesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get silences params +func (o *GetSilencesParams) WithContext(ctx context.Context) *GetSilencesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get silences params +func (o *GetSilencesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get silences params +func (o *GetSilencesParams) WithHTTPClient(client *http.Client) *GetSilencesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get silences params +func (o *GetSilencesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithFilter adds the filter to the get silences params +func (o *GetSilencesParams) WithFilter(filter []string) *GetSilencesParams { + o.SetFilter(filter) + return o +} + +// SetFilter adds the filter to the get silences params +func (o *GetSilencesParams) SetFilter(filter []string) { + o.Filter = filter +} + +// WriteToRequest writes these params to a swagger request +func (o *GetSilencesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Filter != nil { + + // binding items for filter + joinedFilter := o.bindParamFilter(reg) + + // query array param filter + if err := r.SetQueryParam("filter", joinedFilter...); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindParamGetSilences binds the parameter filter +func (o *GetSilencesParams) bindParamFilter(formats strfmt.Registry) []string { + filterIR := o.Filter + + var filterIC []string + for _, filterIIR := range filterIR { // explode []string + + filterIIV := filterIIR // string as string + filterIC = append(filterIC, filterIIV) + } + + // items.CollectionFormat: "multi" + filterIS := swag.JoinByFormat(filterIC, "multi") + + return filterIS +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_responses.go new file mode 100644 index 0000000000..6578dd399e --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/get_silences_responses.go @@ -0,0 +1,177 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// GetSilencesReader is a Reader for the GetSilences structure. +type GetSilencesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetSilencesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetSilencesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetSilencesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetSilencesOK creates a GetSilencesOK with default headers values +func NewGetSilencesOK() *GetSilencesOK { + return &GetSilencesOK{} +} + +/* +GetSilencesOK describes a response with status code 200, with default header values. + +Get silences response +*/ +type GetSilencesOK struct { + Payload models.GettableSilences +} + +// IsSuccess returns true when this get silences o k response has a 2xx status code +func (o *GetSilencesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get silences o k response has a 3xx status code +func (o *GetSilencesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get silences o k response has a 4xx status code +func (o *GetSilencesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get silences o k response has a 5xx status code +func (o *GetSilencesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get silences o k response a status code equal to that given +func (o *GetSilencesOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetSilencesOK) Error() string { + return fmt.Sprintf("[GET /silences][%d] getSilencesOK %+v", 200, o.Payload) +} + +func (o *GetSilencesOK) String() string { + return fmt.Sprintf("[GET /silences][%d] getSilencesOK %+v", 200, o.Payload) +} + +func (o *GetSilencesOK) GetPayload() models.GettableSilences { + return o.Payload +} + +func (o *GetSilencesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetSilencesInternalServerError creates a GetSilencesInternalServerError with default headers values +func NewGetSilencesInternalServerError() *GetSilencesInternalServerError { + return &GetSilencesInternalServerError{} +} + +/* +GetSilencesInternalServerError describes a response with status code 500, with default header values. + +Internal server error +*/ +type GetSilencesInternalServerError struct { + Payload string +} + +// IsSuccess returns true when this get silences internal server error response has a 2xx status code +func (o *GetSilencesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get silences internal server error response has a 3xx status code +func (o *GetSilencesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get silences internal server error response has a 4xx status code +func (o *GetSilencesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get silences internal server error response has a 5xx status code +func (o *GetSilencesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get silences internal server error response a status code equal to that given +func (o *GetSilencesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetSilencesInternalServerError) Error() string { + return fmt.Sprintf("[GET /silences][%d] getSilencesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetSilencesInternalServerError) String() string { + return fmt.Sprintf("[GET /silences][%d] getSilencesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetSilencesInternalServerError) GetPayload() string { + return o.Payload +} + +func (o *GetSilencesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_parameters.go new file mode 100644 index 0000000000..1f884c6457 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/prometheus/alertmanager/api/v2/models" +) + +// NewPostSilencesParams creates a new PostSilencesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewPostSilencesParams() *PostSilencesParams { + return &PostSilencesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewPostSilencesParamsWithTimeout creates a new PostSilencesParams object +// with the ability to set a timeout on a request. +func NewPostSilencesParamsWithTimeout(timeout time.Duration) *PostSilencesParams { + return &PostSilencesParams{ + timeout: timeout, + } +} + +// NewPostSilencesParamsWithContext creates a new PostSilencesParams object +// with the ability to set a context for a request. +func NewPostSilencesParamsWithContext(ctx context.Context) *PostSilencesParams { + return &PostSilencesParams{ + Context: ctx, + } +} + +// NewPostSilencesParamsWithHTTPClient creates a new PostSilencesParams object +// with the ability to set a custom HTTPClient for a request. +func NewPostSilencesParamsWithHTTPClient(client *http.Client) *PostSilencesParams { + return &PostSilencesParams{ + HTTPClient: client, + } +} + +/* +PostSilencesParams contains all the parameters to send to the API endpoint + + for the post silences operation. + + Typically these are written to a http.Request. +*/ +type PostSilencesParams struct { + + /* Silence. + + The silence to create + */ + Silence *models.PostableSilence + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the post silences params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *PostSilencesParams) WithDefaults() *PostSilencesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the post silences params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *PostSilencesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the post silences params +func (o *PostSilencesParams) WithTimeout(timeout time.Duration) *PostSilencesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the post silences params +func (o *PostSilencesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the post silences params +func (o *PostSilencesParams) WithContext(ctx context.Context) *PostSilencesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the post silences params +func (o *PostSilencesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the post silences params +func (o *PostSilencesParams) WithHTTPClient(client *http.Client) *PostSilencesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the post silences params +func (o *PostSilencesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSilence adds the silence to the post silences params +func (o *PostSilencesParams) WithSilence(silence *models.PostableSilence) *PostSilencesParams { + o.SetSilence(silence) + return o +} + +// SetSilence adds the silence to the post silences params +func (o *PostSilencesParams) SetSilence(silence *models.PostableSilence) { + o.Silence = silence +} + +// WriteToRequest writes these params to a swagger request +func (o *PostSilencesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Silence != nil { + if err := r.SetBodyParam(o.Silence); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_responses.go new file mode 100644 index 0000000000..9cf1f2e500 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/post_silences_responses.go @@ -0,0 +1,284 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// PostSilencesReader is a Reader for the PostSilences structure. +type PostSilencesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *PostSilencesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewPostSilencesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewPostSilencesBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewPostSilencesNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewPostSilencesOK creates a PostSilencesOK with default headers values +func NewPostSilencesOK() *PostSilencesOK { + return &PostSilencesOK{} +} + +/* +PostSilencesOK describes a response with status code 200, with default header values. + +Create / update silence response +*/ +type PostSilencesOK struct { + Payload *PostSilencesOKBody +} + +// IsSuccess returns true when this post silences o k response has a 2xx status code +func (o *PostSilencesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this post silences o k response has a 3xx status code +func (o *PostSilencesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post silences o k response has a 4xx status code +func (o *PostSilencesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this post silences o k response has a 5xx status code +func (o *PostSilencesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this post silences o k response a status code equal to that given +func (o *PostSilencesOK) IsCode(code int) bool { + return code == 200 +} + +func (o *PostSilencesOK) Error() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesOK %+v", 200, o.Payload) +} + +func (o *PostSilencesOK) String() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesOK %+v", 200, o.Payload) +} + +func (o *PostSilencesOK) GetPayload() *PostSilencesOKBody { + return o.Payload +} + +func (o *PostSilencesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(PostSilencesOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPostSilencesBadRequest creates a PostSilencesBadRequest with default headers values +func NewPostSilencesBadRequest() *PostSilencesBadRequest { + return &PostSilencesBadRequest{} +} + +/* +PostSilencesBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type PostSilencesBadRequest struct { + Payload string +} + +// IsSuccess returns true when this post silences bad request response has a 2xx status code +func (o *PostSilencesBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this post silences bad request response has a 3xx status code +func (o *PostSilencesBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post silences bad request response has a 4xx status code +func (o *PostSilencesBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this post silences bad request response has a 5xx status code +func (o *PostSilencesBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this post silences bad request response a status code equal to that given +func (o *PostSilencesBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *PostSilencesBadRequest) Error() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesBadRequest %+v", 400, o.Payload) +} + +func (o *PostSilencesBadRequest) String() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesBadRequest %+v", 400, o.Payload) +} + +func (o *PostSilencesBadRequest) GetPayload() string { + return o.Payload +} + +func (o *PostSilencesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewPostSilencesNotFound creates a PostSilencesNotFound with default headers values +func NewPostSilencesNotFound() *PostSilencesNotFound { + return &PostSilencesNotFound{} +} + +/* +PostSilencesNotFound describes a response with status code 404, with default header values. + +A silence with the specified ID was not found +*/ +type PostSilencesNotFound struct { + Payload string +} + +// IsSuccess returns true when this post silences not found response has a 2xx status code +func (o *PostSilencesNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this post silences not found response has a 3xx status code +func (o *PostSilencesNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this post silences not found response has a 4xx status code +func (o *PostSilencesNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this post silences not found response has a 5xx status code +func (o *PostSilencesNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this post silences not found response a status code equal to that given +func (o *PostSilencesNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *PostSilencesNotFound) Error() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesNotFound %+v", 404, o.Payload) +} + +func (o *PostSilencesNotFound) String() string { + return fmt.Sprintf("[POST /silences][%d] postSilencesNotFound %+v", 404, o.Payload) +} + +func (o *PostSilencesNotFound) GetPayload() string { + return o.Payload +} + +func (o *PostSilencesNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +PostSilencesOKBody post silences o k body +swagger:model PostSilencesOKBody +*/ +type PostSilencesOKBody struct { + + // silence ID + SilenceID string `json:"silenceID,omitempty"` +} + +// Validate validates this post silences o k body +func (o *PostSilencesOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this post silences o k body based on context it is used +func (o *PostSilencesOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *PostSilencesOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *PostSilencesOKBody) UnmarshalBinary(b []byte) error { + var res PostSilencesOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/silence_client.go b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/silence_client.go new file mode 100644 index 0000000000..70de0179c9 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/client/silence/silence_client.go @@ -0,0 +1,213 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package silence + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new silence API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for silence API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + DeleteSilence(params *DeleteSilenceParams, opts ...ClientOption) (*DeleteSilenceOK, error) + + GetSilence(params *GetSilenceParams, opts ...ClientOption) (*GetSilenceOK, error) + + GetSilences(params *GetSilencesParams, opts ...ClientOption) (*GetSilencesOK, error) + + PostSilences(params *PostSilencesParams, opts ...ClientOption) (*PostSilencesOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +DeleteSilence Delete a silence by its ID +*/ +func (a *Client) DeleteSilence(params *DeleteSilenceParams, opts ...ClientOption) (*DeleteSilenceOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteSilenceParams() + } + op := &runtime.ClientOperation{ + ID: "deleteSilence", + Method: "DELETE", + PathPattern: "/silence/{silenceID}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &DeleteSilenceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeleteSilenceOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deleteSilence: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetSilence Get a silence by its ID +*/ +func (a *Client) GetSilence(params *GetSilenceParams, opts ...ClientOption) (*GetSilenceOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetSilenceParams() + } + op := &runtime.ClientOperation{ + ID: "getSilence", + Method: "GET", + PathPattern: "/silence/{silenceID}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetSilenceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetSilenceOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getSilence: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetSilences Get a list of silences +*/ +func (a *Client) GetSilences(params *GetSilencesParams, opts ...ClientOption) (*GetSilencesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetSilencesParams() + } + op := &runtime.ClientOperation{ + ID: "getSilences", + Method: "GET", + PathPattern: "/silences", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetSilencesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetSilencesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getSilences: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +PostSilences Post a new silence or update an existing one +*/ +func (a *Client) PostSilences(params *PostSilencesParams, opts ...ClientOption) (*PostSilencesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPostSilencesParams() + } + op := &runtime.ClientOperation{ + ID: "postSilences", + Method: "POST", + PathPattern: "/silences", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &PostSilencesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*PostSilencesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for postSilences: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1feba62c6c..b5c8bcb395 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -475,6 +475,9 @@ type HistogramOpts struct { // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.now == nil { opts.now = time.Now } - + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, lastResetTime: opts.now(), now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -716,9 +722,16 @@ type histogram struct { nativeHistogramMinResetDuration time.Duration // lastResetTime is protected by mtx. It is also used as created timestamp. lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. func (h *histogram) maybeReset( hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, ) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } @@ -906,6 +929,29 @@ func (h *histogram) maybeReset( return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index b3c4eca2bc..c21911f292 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb61..8c1136ceea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go new file mode 100644 index 0000000000..d8d9a6d7a2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go new file mode 100644 index 0000000000..9ba42826ad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import dto "github.com/prometheus/client_model/go" + +// A Problem is an issue detected by a linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index c8864b6c3f..dd29cccc30 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -16,15 +16,11 @@ package promlint import ( "errors" - "fmt" "io" - "regexp" "sort" - "strings" - - "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" ) // A Linter is a Prometheus metrics linter. It identifies issues with metric @@ -37,23 +33,8 @@ type Linter struct { // of them. r io.Reader mfs []*dto.MetricFamily -} -// A Problem is an issue detected by a Linter. -type Problem struct { - // The name of the metric indicated by this Problem. - Metric string - - // A description of the issue for this Problem. - Text string -} - -// newProblem is helper function to create a Problem. -func newProblem(mf *dto.MetricFamily, text string) Problem { - return Problem{ - Metric: mf.GetName(), - Text: text, - } + customValidations []Validation } // New creates a new Linter that reads an input stream of Prometheus metrics in @@ -72,6 +53,14 @@ func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { } } +// AddCustomValidations adds custom validations to the linter. +func (l *Linter) AddCustomValidations(vs ...Validation) { + if l.customValidations == nil { + l.customValidations = make([]Validation, 0, len(vs)) + } + l.customValidations = append(l.customValidations, vs...) +} + // Lint performs a linting pass, returning a slice of Problems indicating any // issues found in the metrics stream. The slice is sorted by metric name // and issue description. @@ -91,11 +80,11 @@ func (l *Linter) Lint() ([]Problem, error) { return nil, err } - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } } for _, mf := range l.mfs { - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } // Ensure deterministic output. @@ -110,276 +99,25 @@ func (l *Linter) Lint() ([]Problem, error) { } // lint is the entry point for linting a single metric. -func lint(mf *dto.MetricFamily) []Problem { - fns := []func(mf *dto.MetricFamily) []Problem{ - lintHelp, - lintMetricUnits, - lintCounter, - lintHistogramSummaryReserved, - lintMetricTypeInName, - lintReservedChars, - lintCamelCase, - lintUnitAbbreviations, - } - - var problems []Problem - for _, fn := range fns { - problems = append(problems, fn(mf)...) - } - - // TODO(mdlayher): lint rules for specific metrics types. - return problems -} - -// lintHelp detects issues related to the help text for a metric. -func lintHelp(mf *dto.MetricFamily) []Problem { +func (l *Linter) lint(mf *dto.MetricFamily) []Problem { var problems []Problem - // Expect all metrics to have help text available. - if mf.Help == nil { - problems = append(problems, newProblem(mf, "no help text")) - } - - return problems -} - -// lintMetricUnits detects issues with metric unit names. -func lintMetricUnits(mf *dto.MetricFamily) []Problem { - var problems []Problem - - unit, base, ok := metricUnits(*mf.Name) - if !ok { - // No known units detected. - return nil - } - - // Unit is already a base unit. - if unit == base { - return nil - } - - problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) - - return problems -} - -// lintCounter detects issues specific to counters, as well as patterns that should -// only be used with counters. -func lintCounter(mf *dto.MetricFamily) []Problem { - var problems []Problem - - isCounter := mf.GetType() == dto.MetricType_COUNTER - isUntyped := mf.GetType() == dto.MetricType_UNTYPED - hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") - - switch { - case isCounter && !hasTotalSuffix: - problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) - case !isUntyped && !isCounter && hasTotalSuffix: - problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) - } - - return problems -} - -// lintHistogramSummaryReserved detects when other types of metrics use names or labels -// reserved for use by histograms and/or summaries. -func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { - // These rules do not apply to untyped metrics. - t := mf.GetType() - if t == dto.MetricType_UNTYPED { - return nil - } - - var problems []Problem - - isHistogram := t == dto.MetricType_HISTOGRAM - isSummary := t == dto.MetricType_SUMMARY - - n := mf.GetName() - - if !isHistogram && strings.HasSuffix(n, "_bucket") { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) - } - - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - ln := l.GetName() - - if !isHistogram && ln == "le" { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) - } - if !isSummary && ln == "quantile" { - problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) - } - } - } - - return problems -} - -// lintMetricTypeInName detects when metric types are included in the metric name. -func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue + for _, fn := range defaultValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } - - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) - } - } - return problems -} - -// lintReservedChars detects colons in metric names. -func lintReservedChars(mf *dto.MetricFamily) []Problem { - var problems []Problem - if strings.Contains(mf.GetName(), ":") { - problems = append(problems, newProblem(mf, "metric names should not contain ':'")) - } - return problems -} - -var camelCase = regexp.MustCompile(`[a-z][A-Z]`) - -// lintCamelCase detects metric names and label names written in camelCase. -func lintCamelCase(mf *dto.MetricFamily) []Problem { - var problems []Problem - if camelCase.FindString(mf.GetName()) != "" { - problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) } - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - if camelCase.FindString(l.GetName()) != "" { - problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + if l.customValidations != nil { + for _, fn := range l.customValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } } } - return problems -} -// lintUnitAbbreviations detects abbreviated units in the metric name. -func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - for _, s := range unitAbbreviations { - if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { - problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) - } - } + // TODO(mdlayher): lint rules for specific metrics types. return problems } - -// metricUnits attempts to detect known unit types used as part of a metric name, -// e.g. "foo_bytes_total" or "bar_baz_milligrams". -func metricUnits(m string) (unit, base string, ok bool) { - ss := strings.Split(m, "_") - - for _, s := range ss { - if base, found := units[s]; found { - return s, base, true - } - - for _, p := range unitPrefixes { - if strings.HasPrefix(s, p) { - if base, found := units[s[len(p):]]; found { - return s, base, true - } - } - } - } - - return "", "", false -} - -// Units and their possible prefixes recognized by this library. More can be -// added over time as needed. -var ( - // map a unit to the appropriate base unit. - units = map[string]string{ - // Base units. - "amperes": "amperes", - "bytes": "bytes", - "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. - "grams": "grams", - "joules": "joules", - "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). - "meters": "meters", // Both American and international spelling permitted. - "metres": "metres", - "seconds": "seconds", - "volts": "volts", - - // Non base units. - // Time. - "minutes": "seconds", - "hours": "seconds", - "days": "seconds", - "weeks": "seconds", - // Temperature. - "kelvins": "kelvin", - "fahrenheit": "celsius", - "rankine": "celsius", - // Length. - "inches": "meters", - "yards": "meters", - "miles": "meters", - // Bytes. - "bits": "bytes", - // Energy. - "calories": "joules", - // Mass. - "pounds": "grams", - "ounces": "grams", - } - - unitPrefixes = []string{ - "pico", - "nano", - "micro", - "milli", - "centi", - "deci", - "deca", - "hecto", - "kilo", - "kibi", - "mega", - "mibi", - "giga", - "gibi", - "tera", - "tebi", - "peta", - "pebi", - } - - // Common abbreviations that we'd like to discourage. - unitAbbreviations = []string{ - "s", - "ms", - "us", - "ns", - "sec", - "b", - "kb", - "mb", - "gb", - "tb", - "pb", - "m", - "h", - "d", - } -) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go new file mode 100644 index 0000000000..f52ad9eab6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import ( + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint/validations" +) + +type Validation = func(mf *dto.MetricFamily) []error + +var defaultValidations = []Validation{ + validations.LintHelp, + validations.LintMetricUnits, + validations.LintCounter, + validations.LintHistogramSummaryReserved, + validations.LintMetricTypeInName, + validations.LintReservedChars, + validations.LintCamelCase, + validations.LintUnitAbbreviations, +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go new file mode 100644 index 0000000000..f2c2c3905d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func LintCounter(mf *dto.MetricFamily) []error { + var problems []error + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, errors.New(`counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`)) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go new file mode 100644 index 0000000000..bc8dbd1e16 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -0,0 +1,101 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "fmt" + "regexp" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// LintMetricUnits detects issues with metric unit names. +func LintMetricUnits(mf *dto.MetricFamily) []error { + var problems []error + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit)) + + return problems +} + +// LintMetricTypeInName detects when metric types are included in the metric name. +func LintMetricTypeInName(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + + for i, t := range dto.MetricType_name { + if i == int32(dto.MetricType_UNTYPED) { + continue + } + + typename := strings.ToLower(t) + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) + } + } + return problems +} + +// LintReservedChars detects colons in metric names. +func LintReservedChars(mf *dto.MetricFamily) []error { + var problems []error + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, errors.New("metric names should not contain ':'")) + } + return problems +} + +// LintCamelCase detects metric names and label names written in camelCase. +func LintCamelCase(mf *dto.MetricFamily) []error { + var problems []error + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// LintUnitAbbreviations detects abbreviated units in the metric name. +func LintUnitAbbreviations(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, errors.New("metric names should not contain abbreviated units")) + } + } + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go new file mode 100644 index 0000000000..1df2944689 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + + dto "github.com/prometheus/client_model/go" +) + +// LintHelp detects issues related to the help text for a metric. +func LintHelp(mf *dto.MetricFamily) []error { + var problems []error + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, errors.New("no help text")) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go new file mode 100644 index 0000000000..6564bdf366 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go new file mode 100644 index 0000000000..967977d2b0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import "strings" + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit, base string, ok bool) { + ss := strings.Split(m, "_") + + for _, s := range ss { + if base, found := units[s]; found { + return s, base, true + } + + for _, p := range unitPrefixes { + if strings.HasPrefix(s, p) { + if base, found := units[s[len(p):]]; found { + return s, base, true + } + } + } + } + + return "", "", false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 82d4a5436b..269f56435b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -47,6 +47,7 @@ import ( "github.com/davecgh/go-spew/spew" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/internal" @@ -230,6 +231,20 @@ func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) return nil, fmt.Errorf("converting reader to metric families failed: %w", err) } + // The text protocol handles empty help fields inconsistently. When + // encoding, any non-nil value, include the empty string, produces a + // "# HELP" line. But when decoding, the help field is only set to a + // non-nil value if the "# HELP" line contains a non-empty value. + // + // Because metrics in a registry always have non-nil help fields, populate + // any nil help fields in the parsed metrics with the empty string so that + // when we compare text encodings, the results are consistent. + for _, metric := range notNormalized { + if metric.Help == nil { + metric.Help = proto.String("") + } + } + return internal.NormalizeMetricFamilies(notNormalized), nil } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 0000000000..447ab8ad63 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea4612..062a281856 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e3957..134767d69a 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee151445a..80df79c319 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a0e..9d8af6db74 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4f9..fa761b3529 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a13f..7e75c286b5 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075db..46307f5721 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index b832ac9a17..c544d7e748 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -454,12 +454,19 @@ var ( OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } + // DefaultScrapeProtocols is the set of scrape protocols that will be proposed + // to scrape target, ordered by priority. DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, PrometheusText0_0_4, } - DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{ + + // DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it + // favors protobuf Prometheus exposition format. + // Used by default for certain feature-flags like + // "native-histograms" and "created-timestamp-zero-ingestion". + DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{ PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, @@ -1117,6 +1124,9 @@ type QueueConfig struct { MinBackoff model.Duration `yaml:"min_backoff,omitempty"` MaxBackoff model.Duration `yaml:"max_backoff,omitempty"` RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"` + + // Samples older than the limit will be dropped. + SampleAgeLimit model.Duration `yaml:"sample_age_limit,omitempty"` } // MetadataConfig is the configuration for sending metadata to remote diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md index 19b579b399..4c06608625 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/README.md +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -234,6 +234,11 @@ type Config interface { type DiscovererOptions struct { Logger log.Logger + + // A registerer for the Discoverer's metrics. + Registerer prometheus.Registerer + + HTTPClientOptions []config.HTTPClientOption } ``` diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index 9dc010a09a..acc4c1efe9 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -18,6 +18,7 @@ import ( "reflect" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -42,6 +43,15 @@ type Discoverer interface { type DiscovererOptions struct { Logger log.Logger + // A registerer for the Discoverer's metrics. + // Some Discoverers may ignore this registerer and use the global one instead. + // For now this will work, because the Prometheus `main` function uses the global registry. + // However, in the future the Prometheus `main` function will be updated to not use the global registry. + // Hence, if a discoverer wants its metrics to be visible via the Prometheus executable's + // `/metrics` endpoint, it should use this explicit registerer. + // TODO(ptodev): Update this comment once the Prometheus `main` function does not use the global registry. + Registerer prometheus.Registerer + // Extra HTTP client options to expose to Discoverers. This field may be // ignored; Discoverer implementations must opt-in to reading it. HTTPClientOptions []config.HTTPClientOption diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go index 4838a89547..9b6bd6741e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go @@ -49,30 +49,14 @@ const ( namespace = "prometheus" ) -var ( - dnsSDLookupsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookups_total", - Help: "The number of DNS-SD lookups.", - }) - dnsSDLookupFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookup_failures_total", - Help: "The number of DNS-SD lookup failures.", - }) - - // DefaultSDConfig is the default DNS SD configuration. - DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(30 * time.Second), - Type: "SRV", - } -) +// DefaultSDConfig is the default DNS SD configuration. +var DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + Type: "SRV", +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(dnsSDLookupFailuresCount, dnsSDLookupsCount) } // SDConfig is the configuration for DNS based service discovery. @@ -88,7 +72,7 @@ func (*SDConfig) Name() string { return "dns" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger), nil + return NewDiscovery(*c, opts.Logger, opts.Registerer) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -118,16 +102,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type Discovery struct { *refresh.Discovery - names []string - port int - qtype uint16 - logger log.Logger + names []string + port int + qtype uint16 + logger log.Logger + dnsSDLookupsCount prometheus.Counter + dnsSDLookupFailuresCount prometheus.Counter lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -151,14 +137,32 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { port: conf.Port, logger: logger, lookupFn: lookupWithSearchPath, + dnsSDLookupsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookups_total", + Help: "The number of DNS-SD lookups.", + }), + dnsSDLookupFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookup_failures_total", + Help: "The number of DNS-SD lookup failures.", + }), } + d.Discovery = refresh.NewDiscovery( - logger, - "dns", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "dns", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: prometheus.NewRegistry(), + Metrics: []prometheus.Collector{d.dnsSDLookupsCount, d.dnsSDLookupFailuresCount}, + }, ) - return d + + return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { @@ -191,9 +195,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error { response, err := d.lookupFn(name, d.qtype, d.logger) - dnsSDLookupsCount.Inc() + d.dnsSDLookupsCount.Inc() if err != nil { - dnsSDLookupFailuresCount.Inc() + d.dnsSDLookupFailuresCount.Inc() return err } diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 60b63350f5..ef6ed1f5ee 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -39,24 +39,6 @@ import ( ) var ( - fileSDReadErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", - }) - fileSDScanDuration = prometheus.NewSummary( - prometheus.SummaryOpts{ - Name: "prometheus_sd_file_scan_duration_seconds", - Help: "The duration of the File-SD scan in seconds.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }) - fileSDTimeStamp = NewTimestampCollector() - fileWatcherErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_watcher_errors_total", - Help: "The number of File-SD errors caused by filesystem watch failures.", - }) - patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) // DefaultSDConfig is the default file SD configuration. @@ -67,7 +49,6 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -81,7 +62,7 @@ func (*SDConfig) Name() string { return "file" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -187,10 +168,17 @@ type Discovery struct { // This is used to detect deleted target groups. lastRefresh map[string]int logger log.Logger + + fileSDReadErrorsCount prometheus.Counter + fileSDScanDuration prometheus.Summary + fileWatcherErrorsCount prometheus.Counter + fileSDTimeStamp *TimestampCollector + + metricRegisterer discovery.MetricRegisterer } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -200,9 +188,35 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { interval: time.Duration(conf.RefreshInterval), timestamps: make(map[string]float64), logger: logger, + fileSDReadErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }), + fileSDScanDuration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_file_scan_duration_seconds", + Help: "The duration of the File-SD scan in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + fileWatcherErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", + }), + fileSDTimeStamp: NewTimestampCollector(), } - fileSDTimeStamp.addDiscoverer(disc) - return disc + + disc.fileSDTimeStamp.addDiscoverer(disc) + + disc.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + disc.fileSDReadErrorsCount, + disc.fileSDScanDuration, + disc.fileWatcherErrorsCount, + disc.fileSDTimeStamp, + }) + + return disc, nil } // listFiles returns a list of all files that match the configured patterns. @@ -239,10 +253,17 @@ func (d *Discovery) watchFiles() { // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) - fileWatcherErrorsCount.Inc() + d.fileWatcherErrorsCount.Inc() return } d.watcher = watcher @@ -306,7 +327,7 @@ func (d *Discovery) stop() { done := make(chan struct{}) defer close(done) - fileSDTimeStamp.removeDiscoverer(d) + d.fileSDTimeStamp.removeDiscoverer(d) // Closing the watcher will deadlock unless all events and errors are drained. go func() { @@ -332,13 +353,13 @@ func (d *Discovery) stop() { func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) { t0 := time.Now() defer func() { - fileSDScanDuration.Observe(time.Since(t0).Seconds()) + d.fileSDScanDuration.Observe(time.Since(t0).Seconds()) }() ref := map[string]int{} for _, p := range d.listFiles() { tgroups, err := d.readFile(p) if err != nil { - fileSDReadErrorsCount.Inc() + d.fileSDReadErrorsCount.Inc() level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) // Prevent deletion down below. diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 86439d2c95..67e326c41a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -28,48 +28,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failedConfigs = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_failed_configs", - Help: "Current number of service discovery configurations that failed to load.", - }, - []string{"name"}, - ) - discoveredTargets = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_discovered_targets", - Help: "Current number of discovered targets.", - }, - []string{"name", "config"}, - ) - receivedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_received_updates_total", - Help: "Total number of update events received from the SD providers.", - }, - []string{"name"}, - ) - delayedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_delayed_total", - Help: "Total number of update events that couldn't be sent immediately.", - }, - []string{"name"}, - ) - sentUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_total", - Help: "Total number of update events sent to the SD consumers.", - }, - []string{"name"}, - ) -) - -func RegisterMetrics() { - prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) -} - type poolKey struct { setName string provider string @@ -107,7 +65,7 @@ func (p *Provider) Config() interface{} { } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager { if logger == nil { logger = log.NewNopLogger() } @@ -118,10 +76,21 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager ctx: ctx, updatert: 5 * time.Second, triggerSend: make(chan struct{}, 1), + registerer: registerer, } for _, option := range options { option(mgr) } + + // Register the metrics. + // We have to do this after setting all options, so that the name of the Manager is set. + if metrics, err := NewMetrics(registerer, mgr.name); err == nil { + mgr.metrics = metrics + } else { + level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + return nil + } + return mgr } @@ -170,6 +139,11 @@ type Manager struct { // lastProvider counts providers registered during Manager's lifetime. lastProvider uint + + // A registerer for all service discovery metrics. + registerer prometheus.Registerer + + metrics *Metrics } // Providers returns the currently configured SD providers. @@ -200,7 +174,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) } - failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + m.metrics.FailedConfigs.Set(float64(failedCount)) var ( wg sync.WaitGroup @@ -230,13 +204,13 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { delete(m.targets, poolKey{s, prov.name}) - discoveredTargets.DeleteLabelValues(m.name, s) + m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, s) } } // Set metrics and targets for new subs. for s := range prov.newSubs { if _, ok := prov.subs[s]; !ok { - discoveredTargets.WithLabelValues(m.name, s).Set(0) + m.metrics.DiscoveredTargets.WithLabelValues(s).Set(0) } if l := len(refTargets); l > 0 { m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) @@ -316,7 +290,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case <-ctx.Done(): return case tgs, ok := <-updates: - receivedUpdates.WithLabelValues(m.name).Inc() + m.metrics.ReceivedUpdates.Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. @@ -349,11 +323,11 @@ func (m *Manager) sender() { case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. select { case <-m.triggerSend: - sentUpdates.WithLabelValues(m.name).Inc() + m.metrics.SentUpdates.Inc() select { case m.syncCh <- m.allGroups(): default: - delayedUpdates.WithLabelValues(m.name).Inc() + m.metrics.DelayedUpdates.Inc() level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: @@ -405,7 +379,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { } } for setName, v := range n { - discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } return tSets } @@ -428,6 +402,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { d, err := cfg.NewDiscoverer(DiscovererOptions{ Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, + Registerer: m.registerer, }) if err != nil { level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics.go b/vendor/github.com/prometheus/prometheus/discovery/metrics.go new file mode 100644 index 0000000000..6a60603955 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/metrics.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + clientGoRequestMetrics = &clientGoRequestMetricAdapter{} + clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} +) + +func init() { + clientGoRequestMetrics.RegisterWithK8sGoClient() + clientGoWorkloadMetrics.RegisterWithK8sGoClient() +} + +// Metrics to be used with a discovery manager. +type Metrics struct { + FailedConfigs prometheus.Gauge + DiscoveredTargets *prometheus.GaugeVec + ReceivedUpdates prometheus.Counter + DelayedUpdates prometheus.Counter + SentUpdates prometheus.Counter +} + +func NewMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) { + m := &Metrics{} + + m.FailedConfigs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DiscoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + []string{"config"}, + ) + + m.ReceivedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DelayedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.SentUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + metrics := []prometheus.Collector{ + m.FailedConfigs, + m.DiscoveredTargets, + m.ReceivedUpdates, + m.DelayedUpdates, + m.SentUpdates, + } + + for _, collector := range metrics { + err := registerer.Register(collector) + if err != nil { + return nil, fmt.Errorf("failed to register discovery manager metrics: %w", err) + } + } + + return m, nil +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go new file mode 100644 index 0000000000..f16245684b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go @@ -0,0 +1,198 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/workqueue" +) + +// This file registers metrics used by the Kubernetes Go client (k8s.io/client-go). +// Unfortunately, k8s.io/client-go metrics are global. +// If we instantiate multiple k8s SD instances, their k8s/client-go metrics will overlap. +// To prevent us from displaying misleading metrics, we register k8s.io/client-go metrics +// outside of the Kubernetes SD. + +const ( + KubernetesMetricsNamespace = "prometheus_sd_kubernetes" + workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" +) + +var ( + // Metrics for client-go's HTTP requests. + clientGoRequestResultMetricVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: KubernetesMetricsNamespace, + Name: "http_request_total", + Help: "Total number of HTTP requests to the Kubernetes API by status code.", + }, + []string{"status_code"}, + ) + clientGoRequestLatencyMetricVec = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: KubernetesMetricsNamespace, + Name: "http_request_duration_seconds", + Help: "Summary of latencies for HTTP requests to the Kubernetes API by endpoint.", + Objectives: map[float64]float64{}, + }, + []string{"endpoint"}, + ) + + // Definition of metrics for client-go workflow metrics provider. + clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: workqueueMetricsNamespace, + Name: "depth", + Help: "Current depth of the work queue.", + }, + []string{"queue_name"}, + ) + clientGoWorkqueueAddsMetricVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: workqueueMetricsNamespace, + Name: "items_total", + Help: "Total number of items added to the work queue.", + }, + []string{"queue_name"}, + ) + clientGoWorkqueueLatencyMetricVec = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: workqueueMetricsNamespace, + Name: "latency_seconds", + Help: "How long an item stays in the work queue.", + Objectives: map[float64]float64{}, + }, + []string{"queue_name"}, + ) + clientGoWorkqueueUnfinishedWorkSecondsMetricVec = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: workqueueMetricsNamespace, + Name: "unfinished_work_seconds", + Help: "How long an item has remained unfinished in the work queue.", + }, + []string{"queue_name"}, + ) + clientGoWorkqueueLongestRunningProcessorMetricVec = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: workqueueMetricsNamespace, + Name: "longest_running_processor_seconds", + Help: "Duration of the longest running processor in the work queue.", + }, + []string{"queue_name"}, + ) + clientGoWorkqueueWorkDurationMetricVec = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: workqueueMetricsNamespace, + Name: "work_duration_seconds", + Help: "How long processing an item from the work queue takes.", + Objectives: map[float64]float64{}, + }, + []string{"queue_name"}, + ) +) + +// Definition of dummy metric used as a placeholder if we don't want to observe some data. +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +// Definition of client-go metrics adapters for HTTP requests observation. +type clientGoRequestMetricAdapter struct{} + +// Returns all of the Prometheus metrics derived from k8s.io/client-go. +// This may be used tu register and unregister the metrics. +func clientGoMetrics() []prometheus.Collector { + return []prometheus.Collector{ + clientGoRequestResultMetricVec, + clientGoRequestLatencyMetricVec, + clientGoWorkqueueDepthMetricVec, + clientGoWorkqueueAddsMetricVec, + clientGoWorkqueueLatencyMetricVec, + clientGoWorkqueueUnfinishedWorkSecondsMetricVec, + clientGoWorkqueueLongestRunningProcessorMetricVec, + clientGoWorkqueueWorkDurationMetricVec, + } +} + +func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { + for _, collector := range clientGoMetrics() { + err := registerer.Register(collector) + if err != nil { + return fmt.Errorf("failed to register Kubernetes Go Client metrics: %w", err) + } + } + return nil +} + +func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() { + metrics.Register( + metrics.RegisterOpts{ + RequestLatency: f, + RequestResult: f, + }, + ) +} + +func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { + clientGoRequestResultMetricVec.WithLabelValues(code).Inc() +} + +func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) { + clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) +} + +// Definition of client-go workqueue metrics provider definition. +type clientGoWorkqueueMetricsProvider struct{} + +func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() { + workqueue.SetProvider(f) +} + +func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) +} + +func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) +} + +func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) +} + +func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) +} + +func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) +} + +func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) +} + +func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric { + // Retries are not used so the metric is omitted. + return noopMetric{} +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go index 919567a53b..0b0e5a921d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go @@ -22,29 +22,17 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failuresCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_refresh_failures_total", - Help: "Number of refresh failures for the given SD mechanism.", - }, - []string{"mechanism"}, - ) - duration = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_sd_refresh_duration_seconds", - Help: "The duration of a refresh in seconds for the given SD mechanism.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"mechanism"}, - ) -) - -func init() { - prometheus.MustRegister(duration, failuresCount) +type Options struct { + Logger log.Logger + Mech string + Interval time.Duration + RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) + Registry prometheus.Registerer + Metrics []prometheus.Collector } // Discovery implements the Discoverer interface. @@ -54,25 +42,62 @@ type Discovery struct { refreshf func(ctx context.Context) ([]*targetgroup.Group, error) failures prometheus.Counter - duration prometheus.Observer + duration prometheus.Summary + + metricRegisterer discovery.MetricRegisterer } // NewDiscovery returns a Discoverer function that calls a refresh() function at every interval. -func NewDiscovery(l log.Logger, mech string, interval time.Duration, refreshf func(ctx context.Context) ([]*targetgroup.Group, error)) *Discovery { - if l == nil { - l = log.NewNopLogger() +func NewDiscovery(opts Options) *Discovery { + var logger log.Logger + if opts.Logger == nil { + logger = log.NewNopLogger() + } else { + logger = opts.Logger + } + + d := Discovery{ + logger: logger, + interval: opts.Interval, + refreshf: opts.RefreshF, + failures: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_refresh_failures_total", + Help: "Number of refresh failures for the given SD mechanism.", + ConstLabels: prometheus.Labels{ + "mechanism": opts.Mech, + }, + }), + duration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_refresh_duration_seconds", + Help: "The duration of a refresh in seconds for the given SD mechanism.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + ConstLabels: prometheus.Labels{ + "mechanism": opts.Mech, + }, + }), } - return &Discovery{ - logger: l, - interval: interval, - refreshf: refreshf, - failures: failuresCount.WithLabelValues(mech), - duration: duration.WithLabelValues(mech), + + metrics := []prometheus.Collector{d.failures, d.duration} + if opts.Metrics != nil { + metrics = append(metrics, opts.Metrics...) } + + d.metricRegisterer = discovery.NewMetricRegisterer(opts.Registry, metrics) + + return &d } // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + // Get an initial set right away. tgs, err := d.refresh(ctx) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/discovery/util.go b/vendor/github.com/prometheus/prometheus/discovery/util.go new file mode 100644 index 0000000000..83cc640dd9 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/util.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +// A utility to be used by implementations of discovery.Discoverer +// which need to manage the lifetime of their metrics. +type MetricRegisterer interface { + RegisterMetrics() error + UnregisterMetrics() +} + +// metricRegistererImpl is an implementation of MetricRegisterer. +type metricRegistererImpl struct { + reg prometheus.Registerer + metrics []prometheus.Collector +} + +var _ MetricRegisterer = &metricRegistererImpl{} + +// Creates an instance of a MetricRegisterer. +// Typically called inside the implementation of the NewDiscoverer() method. +func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { + return &metricRegistererImpl{ + reg: reg, + metrics: metrics, + } +} + +// RegisterMetrics registers the metrics with a Prometheus registerer. +// If any metric fails to register, it will unregister all metrics that +// were registered so far, and return an error. +// Typically called at the start of the SD's Run() method. +func (rh *metricRegistererImpl) RegisterMetrics() error { + for _, collector := range rh.metrics { + err := rh.reg.Register(collector) + if err != nil { + // Unregister all metrics that were registered so far. + // This is so that if RegisterMetrics() gets called again, + // there will not be an error due to a duplicate registration. + rh.UnregisterMetrics() + + return fmt.Errorf("failed to register metric: %w", err) + } + } + return nil +} + +// UnregisterMetrics unregisters the metrics from the same Prometheus +// registerer which was used to register them. +// Typically called at the end of the SD's Run() method by a defer statement. +func (rh *metricRegistererImpl) UnregisterMetrics() { + for _, collector := range rh.metrics { + rh.reg.Unregister(collector) + } +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 1c2f3ebeca..64c7b797a1 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -231,11 +231,8 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // resulting histogram might have buckets with a population of zero or directly // adjacent spans (offset=0). To normalize those, call the Compact method. // -// The method reconciles differences in the zero threshold and in the schema, -// but the schema of the other histogram must be ≥ the schema of the receiving -// histogram (i.e. must have an equal or higher resolution). This means that the -// schema of the receiving histogram won't change. Its zero threshold, however, -// will change if needed. The other histogram will not be modified in any case. +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. // // This method returns a pointer to the receiving histogram for convenience. func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { @@ -269,21 +266,30 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Sum += other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot add histogram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -296,21 +302,29 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Sum -= other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot subtract histigram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -765,8 +779,9 @@ func (h *FloatHistogram) floatBucketIterator( schema: h.Schema, positive: positive, }, - targetSchema: targetSchema, - absoluteStartValue: absoluteStartValue, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, } if positive { i.spans = h.PositiveSpans @@ -824,55 +839,83 @@ func (i *floatBucketIterator) Next() bool { return false } - // Copy all of these into local variables so that we can forward to the - // next bucket and then roll back if needed. - origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan - span := i.spans[spansIdx] - firstPass := true - i.currCount = 0 - -mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. - for { + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] if i.bucketsIdx == 0 { // Seed origIdx for the first bucket. - origIdx = span.Offset + i.currIdx = span.Offset } else { - origIdx++ + i.currIdx++ } - for idxInSpan >= span.Length { + + for i.idxInSpan >= span.Length { // We have exhausted the current span and have to find a new // one. We even handle pathologic spans of length 0 here. - idxInSpan = 0 - spansIdx++ - if spansIdx >= len(i.spans) { - if firstPass { - return false - } - break mergeLoop + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false } - span = i.spans[spansIdx] - origIdx += span.Offset + span = i.spans[i.spansIdx] + i.currIdx += span.Offset } - currIdx := i.targetIdx(origIdx) - switch { - case firstPass: - i.currIdx = currIdx - firstPass = false - case currIdx != i.currIdx: - // Reached next bucket in targetSchema. - // Do not actually forward to the next bucket, but break out. - break mergeLoop - } - i.currCount += i.buckets[i.bucketsIdx] - idxInSpan++ + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ i.bucketsIdx++ - i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan - if i.schema == i.targetSchema { - // Don't need to test the next bucket for mergeability - // if we have no schema change anyway. - break mergeLoop + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } } } + // Skip buckets before absoluteStartValue. // TODO(beorn7): Maybe do something more efficient than this recursive call. if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { @@ -882,17 +925,6 @@ mergeLoop: // Merge together all buckets from the original schema that fall into return true } -// targetIdx returns the bucket index within i.targetSchema for the given bucket -// index within i.schema. -func (i *floatBucketIterator) targetIdx(idx int32) int32 { - if i.schema == i.targetSchema { - // Fast path for the common case. The below would yield the same - // result, just with more effort. - return idx - } - return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 -} - type reverseFloatBucketIterator struct { baseBucketIterator[float64, float64] idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go index fb0185a638..f4d292b344 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -282,50 +282,49 @@ func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { return h } -// ToFloat returns a FloatHistogram representation of the Histogram. It is a -// deep copy (e.g. spans are not shared). -func (h *Histogram) ToFloat() *FloatHistogram { - var ( - positiveSpans, negativeSpans []Span - positiveBuckets, negativeBuckets []float64 - ) - if len(h.PositiveSpans) != 0 { - positiveSpans = make([]Span, len(h.PositiveSpans)) - copy(positiveSpans, h.PositiveSpans) - } - if len(h.NegativeSpans) != 0 { - negativeSpans = make([]Span, len(h.NegativeSpans)) - copy(negativeSpans, h.NegativeSpans) - } - if len(h.PositiveBuckets) != 0 { - positiveBuckets = make([]float64, len(h.PositiveBuckets)) - var current float64 - for i, b := range h.PositiveBuckets { - current += float64(b) - positiveBuckets[i] = current - } - } - if len(h.NegativeBuckets) != 0 { - negativeBuckets = make([]float64, len(h.NegativeBuckets)) - var current float64 - for i, b := range h.NegativeBuckets { - current += float64(b) - negativeBuckets[i] = current - } - } +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} + } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive + } + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + + return fh +} - return &FloatHistogram{ - CounterResetHint: h.CounterResetHint, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) } + return items[:n] } // Validate validates consistency between span and bucket slices. Also, buckets are checked diff --git a/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go index f227af0b95..f6f2827a46 100644 --- a/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go +++ b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go @@ -13,11 +13,11 @@ package metadata -import "github.com/prometheus/prometheus/model/textparse" +import "github.com/prometheus/common/model" // Metadata stores a series' metadata information. type Metadata struct { - Type textparse.MetricType + Type model.MetricType Unit string Help string } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index fadf35b867..d29c3d07ae 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -108,6 +108,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Regex.Regexp == nil { c.Regex = MustNewRegexp("") } + return c.Validate() +} + +func (c *Config) Validate() error { if c.Action == "" { return fmt.Errorf("relabel action cannot be empty") } @@ -117,7 +121,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { + if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { @@ -264,12 +274,11 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { } target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) if !target.IsValid() { - lb.Del(cfg.TargetLabel) break } res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes) if len(res) == 0 { - lb.Del(cfg.TargetLabel) + lb.Del(string(target)) break } lb.Set(string(target), string(res)) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 2f5fdbc3bf..3a363ebfbc 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -16,7 +16,7 @@ package textparse import ( "mime" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -44,7 +44,7 @@ type Parser interface { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. - Type() ([]byte, MetricType) + Type() ([]byte, model.MetricType) // Unit returns the metric name and unit in the current entry. // Must only be called after Next returned a unit entry. @@ -66,10 +66,10 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool - // CreatedTimestamp writes the created timestamp of the current sample - // into the passed timestamp. It returns false if no created timestamp - // exists or if the metric type does not support created timestamps. - CreatedTimestamp(ct *types.Timestamp) bool + // CreatedTimestamp returns the created timestamp (in milliseconds) for the + // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // if the scrape protocol or metric type does not support created timestamps. + CreatedTimestamp() *int64 // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. @@ -111,17 +111,3 @@ const ( EntryUnit Entry = 4 EntryHistogram Entry = 5 // A series with a native histogram as a value. ) - -// MetricType represents metric type values. -type MetricType string - -const ( - MetricTypeCounter = MetricType("counter") - MetricTypeGauge = MetricType("gauge") - MetricTypeHistogram = MetricType("histogram") - MetricTypeGaugeHistogram = MetricType("gaugehistogram") - MetricTypeSummary = MetricType("summary") - MetricTypeInfo = MetricType("info") - MetricTypeStateset = MetricType("stateset") - MetricTypeUnknown = MetricType("unknown") -) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index bb50755441..4c15ff5fc0 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -24,7 +24,7 @@ import ( "strings" "unicode/utf8" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -76,7 +76,7 @@ type OpenMetricsParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool @@ -128,7 +128,7 @@ func (p *OpenMetricsParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *OpenMetricsParser) Type() ([]byte, MetricType) { +func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -136,7 +136,6 @@ func (p *OpenMetricsParser) Type() ([]byte, MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *OpenMetricsParser) Unit() ([]byte, []byte) { - // The Prometheus format does not have units. return p.l.b[p.offsets[0]:p.offsets[1]], p.text } @@ -213,9 +212,10 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns false because OpenMetricsParser does not support created timestamps (yet). -func (p *OpenMetricsParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *OpenMetricsParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the openMetricsLexer. @@ -273,21 +273,21 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "gaugehistogram": - p.mtype = MetricTypeGaugeHistogram + p.mtype = model.MetricTypeGaugeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "info": - p.mtype = MetricTypeInfo + p.mtype = model.MetricTypeInfo case "stateset": - p.mtype = MetricTypeStateset + p.mtype = model.MetricTypeStateset case "unknown": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index b3fa2d8a6d..7123e52c33 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -26,7 +26,7 @@ import ( "unicode/utf8" "unsafe" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -148,7 +148,7 @@ type PromParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool @@ -192,7 +192,7 @@ func (p *PromParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *PromParser) Type() ([]byte, MetricType) { +func (p *PromParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -247,9 +247,10 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } -// CreatedTimestamp returns false because PromParser does not support created timestamps. -func (p *PromParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *PromParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the promlexer. It skips over tabs @@ -306,15 +307,15 @@ func (p *PromParser) Next() (Entry, error) { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "untyped": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index 23afb5c596..2c9c384b6b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -252,27 +252,28 @@ func (p *ProtobufParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *ProtobufParser) Type() ([]byte, MetricType) { +func (p *ProtobufParser) Type() ([]byte, model.MetricType) { n := p.metricBytes.Bytes() switch p.mf.GetType() { case dto.MetricType_COUNTER: - return n, MetricTypeCounter + return n, model.MetricTypeCounter case dto.MetricType_GAUGE: - return n, MetricTypeGauge + return n, model.MetricTypeGauge case dto.MetricType_HISTOGRAM: - return n, MetricTypeHistogram + return n, model.MetricTypeHistogram case dto.MetricType_GAUGE_HISTOGRAM: - return n, MetricTypeGaugeHistogram + return n, model.MetricTypeGaugeHistogram case dto.MetricType_SUMMARY: - return n, MetricTypeSummary + return n, model.MetricTypeSummary } - return n, MetricTypeUnknown + return n, model.MetricTypeUnknown } -// Unit always returns (nil, nil) because units aren't supported by the protobuf -// format. +// Unit returns the metric unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Unit() ([]byte, []byte) { - return nil, nil + return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) } // Comment always returns nil because comments aren't supported by the protobuf @@ -360,22 +361,26 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } -func (p *ProtobufParser) CreatedTimestamp(ct *types.Timestamp) bool { - var foundCT *types.Timestamp +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp switch p.mf.GetType() { case dto.MetricType_COUNTER: - foundCT = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - foundCT = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - foundCT = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() default: } - if foundCT == nil { - return false + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil } - *ct = *foundCT - return true + ctMilis := ctAsTime.UnixMilli() + return &ctMilis } // Next advances the parser to the next "sample" (emulating the behavior of a @@ -418,6 +423,16 @@ func (p *ProtobufParser) Next() (Entry, error) { default: return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) } + unit := p.mf.GetUnit() + if len(unit) > 0 { + if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) + } + } else if !strings.HasSuffix(name, unit) || len(name) < len(unit)+1 || name[len(name)-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) + } + } p.metricBytes.Reset() p.metricBytes.WriteString(name) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index e6623e9e19..702ee62fce 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -878,6 +878,7 @@ type MetricFamily struct { Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -944,6 +945,13 @@ func (m *MetricFamily) GetMetric() []Metric { return nil } +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") @@ -965,67 +973,68 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 960 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xee, 0xd6, 0xbf, 0x7b, 0x1c, 0x27, 0x9b, 0xc1, 0xaa, 0x56, 0x81, 0xc4, 0x66, 0x25, 0xa4, - 0x80, 0x90, 0x2d, 0xa0, 0x08, 0x54, 0x8a, 0x44, 0xd2, 0xa6, 0x2e, 0x2a, 0x6e, 0xcb, 0xd8, 0xbe, - 0x28, 0x37, 0xab, 0xb1, 0x3d, 0x59, 0xaf, 0xd8, 0xdd, 0x59, 0xf6, 0xa7, 0x22, 0xdc, 0xf3, 0x0c, - 0xbc, 0x00, 0x17, 0x3c, 0x05, 0x97, 0xa8, 0x97, 0x5c, 0x71, 0x89, 0x50, 0x9e, 0x04, 0xcd, 0xdf, - 0xae, 0x53, 0xad, 0x03, 0x81, 0xbb, 0x99, 0xcf, 0xdf, 0x39, 0xf3, 0x9d, 0x6f, 0xc6, 0xe7, 0x2c, - 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x69, 0xb6, 0xa6, 0x79, 0x3a, 0x5a, 0x06, 0x3e, 0x8d, 0xb2, - 0x51, 0x48, 0xb3, 0xc4, 0x5f, 0xa6, 0xc3, 0x38, 0x61, 0x19, 0x43, 0x3d, 0x9f, 0x0d, 0x4b, 0xce, - 0x50, 0x72, 0x0e, 0x7a, 0x1e, 0xf3, 0x98, 0x20, 0x8c, 0xf8, 0x4a, 0x72, 0x0f, 0xfa, 0x1e, 0x63, - 0x5e, 0x40, 0x47, 0x62, 0xb7, 0xc8, 0xcf, 0x47, 0x99, 0x1f, 0xd2, 0x34, 0x23, 0x61, 0x2c, 0x09, - 0xce, 0xc7, 0x60, 0x7e, 0x45, 0x16, 0x34, 0x78, 0x4e, 0xfc, 0x04, 0x21, 0xa8, 0x47, 0x24, 0xa4, - 0xb6, 0x31, 0x30, 0x8e, 0x4d, 0x2c, 0xd6, 0xa8, 0x07, 0x8d, 0x97, 0x24, 0xc8, 0xa9, 0x7d, 0x5b, - 0x80, 0x72, 0xe3, 0x1c, 0x42, 0x63, 0x4c, 0x72, 0x6f, 0xe3, 0x67, 0x1e, 0x63, 0xe8, 0x9f, 0x7f, - 0x36, 0xa0, 0xf5, 0x80, 0xe5, 0x51, 0x46, 0x93, 0x6a, 0x06, 0xba, 0x07, 0x6d, 0xfa, 0x3d, 0x0d, - 0xe3, 0x80, 0x24, 0x22, 0x73, 0xe7, 0xc3, 0xa3, 0x61, 0x55, 0x5d, 0xc3, 0x33, 0xc5, 0xc2, 0x05, - 0x1f, 0x8d, 0x61, 0x7f, 0x99, 0x50, 0x92, 0xd1, 0x95, 0x5b, 0x94, 0x63, 0xd7, 0x44, 0x92, 0x83, - 0xa1, 0x2c, 0x78, 0xa8, 0x0b, 0x1e, 0xce, 0x34, 0x03, 0x5b, 0x2a, 0xa8, 0x40, 0x9c, 0xfb, 0xd0, - 0xfe, 0x3a, 0x27, 0x51, 0xe6, 0x07, 0x14, 0x1d, 0x40, 0xfb, 0x3b, 0xb5, 0x56, 0x4a, 0x8b, 0xfd, - 0x55, 0x0f, 0x8a, 0x22, 0xff, 0x30, 0xa0, 0x35, 0xcd, 0xc3, 0x90, 0x24, 0x17, 0xe8, 0x6d, 0xd8, - 0x49, 0x49, 0x18, 0x07, 0xd4, 0x5d, 0xf2, 0xb2, 0x45, 0x86, 0x3a, 0xee, 0x48, 0x4c, 0x38, 0x81, - 0x0e, 0x01, 0x14, 0x25, 0xcd, 0x43, 0x95, 0xc9, 0x94, 0xc8, 0x34, 0x0f, 0xd1, 0x17, 0x1b, 0xe7, - 0xd7, 0x06, 0xb5, 0xed, 0x86, 0x68, 0xc5, 0xa7, 0xf5, 0x57, 0x7f, 0xf6, 0x6f, 0x6d, 0xa8, 0xac, - 0xb4, 0xa5, 0xfe, 0x1f, 0x6c, 0xe9, 0x43, 0x6b, 0x1e, 0x65, 0x17, 0x31, 0x5d, 0x6d, 0xb9, 0xde, - 0x5f, 0x1b, 0x60, 0x3e, 0xf6, 0xd3, 0x8c, 0x79, 0x09, 0x09, 0xff, 0x4d, 0xed, 0xef, 0x03, 0xda, - 0xa4, 0xb8, 0xe7, 0x01, 0x23, 0x99, 0xd0, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x88, 0xe3, 0xff, 0xe4, - 0xd4, 0x3d, 0x68, 0x2e, 0xf2, 0xe5, 0xb7, 0x34, 0x53, 0x3e, 0xbd, 0x55, 0xed, 0xd3, 0xa9, 0xe0, - 0x28, 0x97, 0x54, 0x44, 0xb5, 0x47, 0x7b, 0x37, 0xf7, 0x08, 0xdd, 0x81, 0x66, 0xba, 0x5c, 0xd3, - 0x90, 0xd8, 0x8d, 0x81, 0x71, 0xbc, 0x8f, 0xd5, 0x0e, 0xbd, 0x03, 0xbb, 0x3f, 0xd0, 0x84, 0xb9, - 0xd9, 0x3a, 0xa1, 0xe9, 0x9a, 0x05, 0x2b, 0xbb, 0x29, 0xf4, 0x77, 0x39, 0x3a, 0xd3, 0x20, 0x2f, - 0x51, 0xd0, 0xa4, 0x63, 0x2d, 0xe1, 0x98, 0xc9, 0x11, 0xe9, 0xd7, 0x31, 0x58, 0xe5, 0xcf, 0xca, - 0xad, 0xb6, 0xc8, 0xb3, 0x5b, 0x90, 0xa4, 0x57, 0x4f, 0xa0, 0x1b, 0x51, 0x8f, 0x64, 0xfe, 0x4b, - 0xea, 0xa6, 0x31, 0x89, 0x6c, 0x53, 0x78, 0x32, 0xb8, 0xce, 0x93, 0x69, 0x4c, 0x22, 0xe5, 0xcb, - 0x8e, 0x0e, 0xe6, 0x18, 0x17, 0x5f, 0x24, 0x5b, 0xd1, 0x20, 0x23, 0x36, 0x0c, 0x6a, 0xc7, 0x08, - 0x17, 0x47, 0x3c, 0xe4, 0xe0, 0x15, 0x9a, 0x2c, 0xa0, 0x33, 0xa8, 0xf1, 0x1a, 0x35, 0x2a, 0x8b, - 0x78, 0x02, 0xdd, 0x98, 0xa5, 0x7e, 0x29, 0x6d, 0xe7, 0x66, 0xd2, 0x74, 0xb0, 0x96, 0x56, 0x24, - 0x93, 0xd2, 0xba, 0x52, 0x9a, 0x46, 0x0b, 0x69, 0x05, 0x4d, 0x4a, 0xdb, 0x95, 0xd2, 0x34, 0x2a, - 0xa4, 0x39, 0xbf, 0x19, 0xd0, 0x94, 0x07, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x96, - 0x23, 0x5f, 0xf0, 0x5e, 0x89, 0xcb, 0x82, 0xee, 0xc2, 0x9d, 0xd7, 0xa9, 0x57, 0x5e, 0x72, 0xef, - 0xb5, 0x00, 0x79, 0x43, 0x7d, 0xe8, 0xe4, 0x71, 0x4c, 0x13, 0x77, 0xc1, 0xf2, 0x68, 0xa5, 0x9e, - 0x33, 0x08, 0xe8, 0x94, 0x23, 0x57, 0x5a, 0x61, 0xed, 0x66, 0xad, 0xd0, 0xb9, 0x0f, 0x50, 0x1a, - 0xc7, 0x1f, 0x25, 0x3b, 0x3f, 0x4f, 0xa9, 0xac, 0x60, 0x1f, 0xab, 0x1d, 0xc7, 0x03, 0x1a, 0x79, - 0xd9, 0x5a, 0x9c, 0xde, 0xc5, 0x6a, 0xe7, 0xfc, 0x64, 0x40, 0x5b, 0x27, 0x45, 0x9f, 0x41, 0x23, - 0xe0, 0x93, 0xc0, 0x36, 0xc4, 0x35, 0xf5, 0xab, 0x35, 0x14, 0xc3, 0x42, 0xdd, 0x92, 0x8c, 0xa9, - 0xee, 0x90, 0xe8, 0x53, 0x30, 0x6f, 0xd2, 0xa0, 0x4b, 0xb2, 0xf3, 0x63, 0x0d, 0x9a, 0x13, 0x31, - 0xf5, 0xfe, 0x9f, 0xae, 0x0f, 0xa0, 0xe1, 0xf1, 0x39, 0xa5, 0x66, 0xcc, 0x9b, 0xd5, 0xc1, 0x62, - 0x94, 0x61, 0xc9, 0x44, 0x9f, 0x40, 0x6b, 0x29, 0x47, 0x97, 0x92, 0x7c, 0x58, 0x1d, 0xa4, 0xe6, - 0x1b, 0xd6, 0x6c, 0x1e, 0x98, 0xca, 0x71, 0xa0, 0xba, 0xee, 0x96, 0x40, 0x35, 0x33, 0xb0, 0x66, - 0xf3, 0xc0, 0x5c, 0xf6, 0x5b, 0xd1, 0x4c, 0xb6, 0x06, 0xaa, 0xa6, 0x8c, 0x35, 0x1b, 0x7d, 0x0e, - 0xe6, 0x5a, 0xb7, 0x61, 0xd1, 0x44, 0xb6, 0xda, 0x53, 0x74, 0x6b, 0x5c, 0x46, 0xf0, 0xc6, 0x5d, - 0x38, 0xee, 0x86, 0xa9, 0xe8, 0x54, 0x35, 0xdc, 0x29, 0xb0, 0x49, 0xea, 0xfc, 0x62, 0xc0, 0x8e, - 0xbc, 0x87, 0x47, 0x24, 0xf4, 0x83, 0x8b, 0xca, 0x4f, 0x04, 0x04, 0xf5, 0x35, 0x0d, 0x62, 0xf5, - 0x85, 0x20, 0xd6, 0xe8, 0x2e, 0xd4, 0xb9, 0x46, 0x61, 0xe1, 0xee, 0xb6, 0xff, 0xbc, 0xcc, 0x3c, - 0xbb, 0x88, 0x29, 0x16, 0x6c, 0xde, 0xda, 0xe5, 0xb7, 0x8e, 0x5d, 0xbf, 0xae, 0xb5, 0xcb, 0x38, - 0xdd, 0xda, 0x65, 0xc4, 0x7b, 0x0b, 0x80, 0x32, 0x1f, 0xea, 0x40, 0xeb, 0xc1, 0xb3, 0xf9, 0xd3, - 0xd9, 0x19, 0xb6, 0x6e, 0x21, 0x13, 0x1a, 0xe3, 0x93, 0xf9, 0xf8, 0xcc, 0x32, 0x38, 0x3e, 0x9d, - 0x4f, 0x26, 0x27, 0xf8, 0x85, 0x75, 0x9b, 0x6f, 0xe6, 0x4f, 0x67, 0x2f, 0x9e, 0x9f, 0x3d, 0xb4, - 0x6a, 0xa8, 0x0b, 0xe6, 0xe3, 0x2f, 0xa7, 0xb3, 0x67, 0x63, 0x7c, 0x32, 0xb1, 0xea, 0xe8, 0x0d, - 0xd8, 0x13, 0x31, 0x6e, 0x09, 0x36, 0x4e, 0x9d, 0x57, 0x97, 0x47, 0xc6, 0xef, 0x97, 0x47, 0xc6, - 0x5f, 0x97, 0x47, 0xc6, 0x37, 0x3d, 0x9f, 0xb9, 0xa5, 0x38, 0x57, 0x8a, 0x5b, 0x34, 0xc5, 0xcb, - 0xfe, 0xe8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x2e, 0x66, 0xc1, 0xcb, 0x09, 0x00, 0x00, + // 969 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0xae, 0x9b, 0x5f, 0x9f, 0x6c, 0x76, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0x52, 0x24, 0x76, 0xdb, 0x6d, 0x8a, 0x4a, 0xda, 0x32, 0x49, + 0x2e, 0xca, 0x8d, 0x35, 0x49, 0x66, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, 0x33, + 0xf0, 0x02, 0x3c, 0x06, 0xe2, 0x12, 0xf5, 0x92, 0x2b, 0x2e, 0x11, 0xda, 0x27, 0x41, 0xf3, 0x63, + 0x3b, 0x5b, 0x39, 0x0b, 0x0b, 0x77, 0x33, 0x5f, 0xbe, 0x73, 0xe6, 0x3b, 0xdf, 0x4c, 0xce, 0x31, + 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x29, 0x5f, 0xd3, 0x2c, 0x1d, 0x2d, 0x03, 0x9f, 0x46, 0x7c, + 0x14, 0x52, 0x9e, 0xf8, 0xcb, 0x74, 0x18, 0x27, 0x8c, 0x33, 0xd4, 0xf3, 0xd9, 0xb0, 0xe4, 0x0c, + 0x15, 0xe7, 0xa0, 0xe7, 0x31, 0x8f, 0x49, 0xc2, 0x48, 0xac, 0x14, 0xf7, 0xa0, 0xef, 0x31, 0xe6, + 0x05, 0x74, 0x24, 0x77, 0x8b, 0xec, 0x7c, 0xc4, 0xfd, 0x90, 0xa6, 0x9c, 0x84, 0xb1, 0x22, 0x38, + 0x1f, 0x83, 0xf9, 0x15, 0x59, 0xd0, 0xe0, 0x39, 0xf1, 0x13, 0x84, 0xa0, 0x1e, 0x91, 0x90, 0xda, + 0xc6, 0xc0, 0x38, 0x36, 0xb1, 0x5c, 0xa3, 0x1e, 0x34, 0x5e, 0x92, 0x20, 0xa3, 0xf6, 0x6d, 0x09, + 0xaa, 0x8d, 0x73, 0x08, 0x8d, 0x31, 0xc9, 0xbc, 0x8d, 0x9f, 0x45, 0x8c, 0x91, 0xff, 0xfc, 0xb3, + 0x01, 0xad, 0x07, 0x2c, 0x8b, 0x38, 0x4d, 0xaa, 0x19, 0xe8, 0x1e, 0xb4, 0xe9, 0xf7, 0x34, 0x8c, + 0x03, 0x92, 0xc8, 0xcc, 0x9d, 0x0f, 0x8f, 0x86, 0x55, 0x75, 0x0d, 0xcf, 0x34, 0x0b, 0x17, 0x7c, + 0x34, 0x86, 0xfd, 0x65, 0x42, 0x09, 0xa7, 0x2b, 0xb7, 0x28, 0xc7, 0xae, 0xc9, 0x24, 0x07, 0x43, + 0x55, 0xf0, 0x30, 0x2f, 0x78, 0x38, 0xcb, 0x19, 0xd8, 0xd2, 0x41, 0x05, 0xe2, 0xdc, 0x87, 0xf6, + 0xd7, 0x19, 0x89, 0xb8, 0x1f, 0x50, 0x74, 0x00, 0xed, 0xef, 0xf4, 0x5a, 0x2b, 0x2d, 0xf6, 0x57, + 0x3d, 0x28, 0x8a, 0xfc, 0xc3, 0x80, 0xd6, 0x34, 0x0b, 0x43, 0x92, 0x5c, 0xa0, 0xb7, 0x61, 0x27, + 0x25, 0x61, 0x1c, 0x50, 0x77, 0x29, 0xca, 0x96, 0x19, 0xea, 0xb8, 0xa3, 0x30, 0xe9, 0x04, 0x3a, + 0x04, 0xd0, 0x94, 0x34, 0x0b, 0x75, 0x26, 0x53, 0x21, 0xd3, 0x2c, 0x44, 0x5f, 0x6c, 0x9c, 0x5f, + 0x1b, 0xd4, 0xb6, 0x1b, 0x92, 0x2b, 0x3e, 0xad, 0xbf, 0xfa, 0xb3, 0x7f, 0x6b, 0x43, 0x65, 0xa5, + 0x2d, 0xf5, 0xff, 0x60, 0x4b, 0x1f, 0x5a, 0xf3, 0x88, 0x5f, 0xc4, 0x74, 0xb5, 0xe5, 0x7a, 0x7f, + 0x6d, 0x80, 0xf9, 0xd8, 0x4f, 0x39, 0xf3, 0x12, 0x12, 0xfe, 0x9b, 0xda, 0xdf, 0x07, 0xb4, 0x49, + 0x71, 0xcf, 0x03, 0x46, 0xb8, 0xd4, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x48, 0xe0, 0xff, 0xe4, 0xd4, + 0x3d, 0x68, 0x2e, 0xb2, 0xe5, 0xb7, 0x94, 0x6b, 0x9f, 0xde, 0xaa, 0xf6, 0xe9, 0x54, 0x72, 0xb4, + 0x4b, 0x3a, 0xa2, 0xda, 0xa3, 0xbd, 0x9b, 0x7b, 0x84, 0xee, 0x40, 0x33, 0x5d, 0xae, 0x69, 0x48, + 0xec, 0xc6, 0xc0, 0x38, 0xde, 0xc7, 0x7a, 0x87, 0xde, 0x81, 0xdd, 0x1f, 0x68, 0xc2, 0x5c, 0xbe, + 0x4e, 0x68, 0xba, 0x66, 0xc1, 0xca, 0x6e, 0x4a, 0xfd, 0x5d, 0x81, 0xce, 0x72, 0x50, 0x94, 0x28, + 0x69, 0xca, 0xb1, 0x96, 0x74, 0xcc, 0x14, 0x88, 0xf2, 0xeb, 0x18, 0xac, 0xf2, 0x67, 0xed, 0x56, + 0x5b, 0xe6, 0xd9, 0x2d, 0x48, 0xca, 0xab, 0x27, 0xd0, 0x8d, 0xa8, 0x47, 0xb8, 0xff, 0x92, 0xba, + 0x69, 0x4c, 0x22, 0xdb, 0x94, 0x9e, 0x0c, 0xae, 0xf3, 0x64, 0x1a, 0x93, 0x48, 0xfb, 0xb2, 0x93, + 0x07, 0x0b, 0x4c, 0x88, 0x2f, 0x92, 0xad, 0x68, 0xc0, 0x89, 0x0d, 0x83, 0xda, 0x31, 0xc2, 0xc5, + 0x11, 0x0f, 0x05, 0x78, 0x85, 0xa6, 0x0a, 0xe8, 0x0c, 0x6a, 0xa2, 0xc6, 0x1c, 0x55, 0x45, 0x3c, + 0x81, 0x6e, 0xcc, 0x52, 0xbf, 0x94, 0xb6, 0x73, 0x33, 0x69, 0x79, 0x70, 0x2e, 0xad, 0x48, 0xa6, + 0xa4, 0x75, 0x95, 0xb4, 0x1c, 0x2d, 0xa4, 0x15, 0x34, 0x25, 0x6d, 0x57, 0x49, 0xcb, 0x51, 0x29, + 0xcd, 0xf9, 0xcd, 0x80, 0xa6, 0x3a, 0x10, 0xbd, 0x0b, 0xd6, 0x32, 0x0b, 0xb3, 0x60, 0xb3, 0x1c, + 0xf5, 0x82, 0xf7, 0x4a, 0x5c, 0x15, 0x74, 0x17, 0xee, 0xbc, 0x4e, 0xbd, 0xf2, 0x92, 0x7b, 0xaf, + 0x05, 0xa8, 0x1b, 0xea, 0x43, 0x27, 0x8b, 0x63, 0x9a, 0xb8, 0x0b, 0x96, 0x45, 0x2b, 0xfd, 0x9c, + 0x41, 0x42, 0xa7, 0x02, 0xb9, 0xd2, 0x0a, 0x6b, 0x37, 0x6b, 0x85, 0xce, 0x7d, 0x80, 0xd2, 0x38, + 0xf1, 0x28, 0xd9, 0xf9, 0x79, 0x4a, 0x55, 0x05, 0xfb, 0x58, 0xef, 0x04, 0x1e, 0xd0, 0xc8, 0xe3, + 0x6b, 0x79, 0x7a, 0x17, 0xeb, 0x9d, 0xf3, 0x93, 0x01, 0xed, 0x3c, 0x29, 0xfa, 0x0c, 0x1a, 0x81, + 0x98, 0x04, 0xb6, 0x21, 0xaf, 0xa9, 0x5f, 0xad, 0xa1, 0x18, 0x16, 0xfa, 0x96, 0x54, 0x4c, 0x75, + 0x87, 0x44, 0x9f, 0x82, 0x79, 0x93, 0x06, 0x5d, 0x92, 0x9d, 0x1f, 0x6b, 0xd0, 0x9c, 0xc8, 0xa9, + 0xf7, 0xff, 0x74, 0x7d, 0x00, 0x0d, 0x4f, 0xcc, 0x29, 0x3d, 0x63, 0xde, 0xac, 0x0e, 0x96, 0xa3, + 0x0c, 0x2b, 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x35, 0xba, 0xb4, 0xe4, 0xc3, 0xea, 0x20, 0x3d, 0xdf, + 0x70, 0xce, 0x16, 0x81, 0xa9, 0x1a, 0x07, 0xba, 0xeb, 0x6e, 0x09, 0xd4, 0x33, 0x03, 0xe7, 0x6c, + 0x11, 0x98, 0xa9, 0x7e, 0x2b, 0x9b, 0xc9, 0xd6, 0x40, 0xdd, 0x94, 0x71, 0xce, 0x46, 0x9f, 0x83, + 0xb9, 0xce, 0xdb, 0xb0, 0x6c, 0x22, 0x5b, 0xed, 0x29, 0xba, 0x35, 0x2e, 0x23, 0x44, 0xe3, 0x2e, + 0x1c, 0x77, 0xc3, 0x54, 0x76, 0xaa, 0x1a, 0xee, 0x14, 0xd8, 0x24, 0x75, 0x7e, 0x31, 0x60, 0x47, + 0xdd, 0xc3, 0x23, 0x12, 0xfa, 0xc1, 0x45, 0xe5, 0x27, 0x02, 0x82, 0xfa, 0x9a, 0x06, 0xb1, 0xfe, + 0x42, 0x90, 0x6b, 0x74, 0x17, 0xea, 0x42, 0xa3, 0xb4, 0x70, 0x77, 0xdb, 0x7f, 0x5e, 0x65, 0x9e, + 0x5d, 0xc4, 0x14, 0x4b, 0xb6, 0x68, 0xed, 0xea, 0x5b, 0xc7, 0xae, 0x5f, 0xd7, 0xda, 0x55, 0x5c, + 0xde, 0xda, 0x55, 0x84, 0x50, 0x91, 0x45, 0x3e, 0x97, 0x16, 0x9a, 0x58, 0xae, 0xdf, 0x5b, 0x00, + 0x94, 0x67, 0xa0, 0x0e, 0xb4, 0x1e, 0x3c, 0x9b, 0x3f, 0x9d, 0x9d, 0x61, 0xeb, 0x16, 0x32, 0xa1, + 0x31, 0x3e, 0x99, 0x8f, 0xcf, 0x2c, 0x43, 0xe0, 0xd3, 0xf9, 0x64, 0x72, 0x82, 0x5f, 0x58, 0xb7, + 0xc5, 0x66, 0xfe, 0x74, 0xf6, 0xe2, 0xf9, 0xd9, 0x43, 0xab, 0x86, 0xba, 0x60, 0x3e, 0xfe, 0x72, + 0x3a, 0x7b, 0x36, 0xc6, 0x27, 0x13, 0xab, 0x8e, 0xde, 0x80, 0x3d, 0x19, 0xe3, 0x96, 0x60, 0xe3, + 0xd4, 0x79, 0x75, 0x79, 0x64, 0xfc, 0x7e, 0x79, 0x64, 0xfc, 0x75, 0x79, 0x64, 0x7c, 0xd3, 0xf3, + 0x99, 0x5b, 0x0a, 0x76, 0x95, 0xe0, 0x45, 0x53, 0xbe, 0xf6, 0x8f, 0xfe, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x6d, 0x53, 0xc5, 0x1e, 0xdf, 0x09, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1755,6 +1764,13 @@ func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } if len(m.Metric) > 0 { for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { { @@ -2128,6 +2144,10 @@ func (m *MetricFamily) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4074,6 +4094,38 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 13ac8dcb4d..06dcc34afb 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -153,4 +153,5 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; + string unit = 5; } diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index 125f868e97..93883daa13 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -164,7 +164,7 @@ func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { type MetricMetadata struct { // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index aa322515c3..61fc1e0143 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -31,7 +31,7 @@ message MetricMetadata { } // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. MetricType type = 1; string metric_family_name = 2; string help = 4; diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 4fa2a513be..2ea37dae60 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -2052,7 +2052,12 @@ func (ev *evaluator) matrixIterSlice( var drop int for drop = 0; histograms[drop].T < mint; drop++ { } + // Rotate the buffer around the drop index so that points before mint can be + // reused to store new histograms. + tail := make([]HPoint, drop) + copy(tail, histograms[:drop]) copy(histograms, histograms[drop:]) + copy(histograms[len(histograms)-drop:], tail) histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. @@ -2078,21 +2083,27 @@ loop: case chunkenc.ValNone: break loop case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := buf.AtFloatHistogram() - if value.IsStaleNaN(h.Sum) { - continue loop - } + t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. if t >= mintHistograms { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - point := HPoint{T: t, H: h} if histograms == nil { histograms = getHPointSlice(16) } - histograms = append(histograms, point) - ev.currentSamples += point.size() + n := len(histograms) + if n < cap(histograms) { + histograms = histograms[:n+1] + } else { + histograms = append(histograms, HPoint{}) + } + histograms[n].T, histograms[n].H = buf.AtFloatHistogram(histograms[n].H) + if value.IsStaleNaN(histograms[n].H.Sum) { + histograms = histograms[:n] + continue loop + } + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples += histograms[n].size() } case chunkenc.ValFloat: t, f := buf.At() @@ -2115,17 +2126,22 @@ loop: // The sought sample might also be in the range. switch soughtValueType { case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := it.AtFloatHistogram() - if t == maxt && !value.IsStaleNaN(h.Sum) { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if histograms == nil { - histograms = getHPointSlice(16) + t := it.AtT() + if t == maxt { + _, h := it.AtFloatHistogram() + if !value.IsStaleNaN(h.Sum) { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if histograms == nil { + histograms = getHPointSlice(16) + } + // The last sample comes directly from the iterator, so we need to copy it to + // avoid having the same reference twice in the buffer. + point := HPoint{T: t, H: h.Copy()} + histograms = append(histograms, point) + ev.currentSamples += point.size() } - point := HPoint{T: t, H: h} - histograms = append(histograms, point) - ev.currentSamples += point.size() } case chunkenc.ValFloat: t, f := it.At() diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 06f6f8c71c..407a11b50a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -609,6 +609,25 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod }), nil } +// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + values := make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: f.F}) + } + median := quantile(0.5, values) + values = make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: math.Abs(f.F - median)}) + } + return quantile(0.5, values) + }), nil +} + // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { @@ -1538,6 +1557,7 @@ var FunctionCalls = map[string]FunctionCall{ "log10": funcLog10, "log2": funcLog2, "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, "max_over_time": funcMaxOverTime, "min_over_time": funcMinOverTime, "minute": funcMinute, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index ee2e90c55e..46d50d5476 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -254,6 +254,12 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "mad_over_time": { + Name: "mad_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, "max_over_time": { Name: "max_over_time", ArgTypes: []ValueType{ValueTypeMatrix}, diff --git a/vendor/github.com/prometheus/prometheus/promql/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/testdata/functions.test index b5263a96fc..b4547886a7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/testdata/functions.test @@ -739,6 +739,14 @@ eval instant at 1m stdvar_over_time(metric[1m]) eval instant at 1m stddev_over_time(metric[1m]) {} 0 +# Tests for mad_over_time. +clear +load 10s + metric 4 6 2 1 999 1 2 + +eval instant at 70s mad_over_time(metric[70s]) + {} 1 + # Tests for quantile_over_time clear diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 3b70e48a13..3ad315a50a 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -78,9 +78,15 @@ type Options struct { EnableMetadataStorage bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration + // Option to enable the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool // Optional HTTP client options to use when scraping. HTTPClientOptions []config_util.HTTPClientOption + + // private option for testability. + skipOffsetting bool } // Manager maintains a set of scrape pools and manages start/stop cycles @@ -282,24 +288,10 @@ func (m *Manager) TargetsActive() map[string][]*Target { m.mtxScrape.Lock() defer m.mtxScrape.Unlock() - var ( - wg sync.WaitGroup - mtx sync.Mutex - ) - targets := make(map[string][]*Target, len(m.scrapePools)) - wg.Add(len(m.scrapePools)) for tset, sp := range m.scrapePools { - // Running in parallel limits the blocking time of scrapePool to scrape - // interval when there's an update from SD. - go func(tset string, sp *scrapePool) { - mtx.Lock() - targets[tset] = sp.ActiveTargets() - mtx.Unlock() - wg.Done() - }(tset, sp) + targets[tset] = sp.ActiveTargets() } - wg.Wait() return targets } diff --git a/vendor/github.com/prometheus/prometheus/scrape/metrics.go b/vendor/github.com/prometheus/prometheus/scrape/metrics.go index d74143185b..7082bc743b 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/metrics.go +++ b/vendor/github.com/prometheus/prometheus/scrape/metrics.go @@ -286,8 +286,8 @@ func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { for tset, targets := range mc.TargetsGatherer.TargetsActive() { var size, length int for _, t := range targets { - size += t.MetadataSize() - length += t.MetadataLength() + size += t.SizeMetadata() + length += t.LengthMetadata() } ch <- prometheus.MustNewConstMetric( diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 9a0ba1d009..2518912a7a 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -106,9 +106,10 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool - mrc []*relabel.Config - cache *scrapeCache - enableCompression bool + + mrc []*relabel.Config + cache *scrapeCache + enableCompression bool } const maxAheadTime = 10 * time.Minute @@ -168,11 +169,13 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.scrapeClassicHistograms, + options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, options.EnableMetadataStorage, opts.target, options.PassMetadataInContext, metrics, + options.skipOffsetting, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -672,7 +675,7 @@ func acceptHeader(sps []config.ScrapeProtocol) string { weight-- } // Default match anything. - vals = append(vals, fmt.Sprintf("*/*;q=%d", weight)) + vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight)) return strings.Join(vals, ",") } @@ -787,6 +790,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool + enableCTZeroIngestion bool appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -804,6 +808,8 @@ type scrapeLoop struct { appendMetadataToWAL bool metrics *scrapeMetrics + + skipOffsetting bool // For testability. } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -955,12 +961,12 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { } } -func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { +func (c *scrapeCache) setType(metric []byte, t model.MetricType) { c.metaMtx.Lock() e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Type != t { @@ -977,7 +983,7 @@ func (c *scrapeCache) setHelp(metric, help []byte) { e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Help != string(help) { @@ -994,7 +1000,7 @@ func (c *scrapeCache) setUnit(metric, unit []byte) { e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Unit != string(unit) { @@ -1076,11 +1082,13 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, scrapeClassicHistograms bool, + enableCTZeroIngestion bool, reportExtraMetrics bool, appendMetadataToWAL bool, target *Target, passMetadataInContext bool, metrics *scrapeMetrics, + skipOffsetting bool, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1124,9 +1132,11 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, scrapeClassicHistograms: scrapeClassicHistograms, + enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, appendMetadataToWAL: appendMetadataToWAL, metrics: metrics, + skipOffsetting: skipOffsetting, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1134,12 +1144,14 @@ func newScrapeLoop(ctx context.Context, } func (sl *scrapeLoop) run(errc chan<- error) { - select { - case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): - // Continue after a scraping offset. - case <-sl.ctx.Done(): - close(sl.stopped) - return + if !sl.skipOffsetting { + select { + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): + // Continue after a scraping offset. + case <-sl.ctx.Done(): + close(sl.stopped) + return + } } var last time.Time @@ -1557,6 +1569,15 @@ loop: updateMetadata(lset, true) } + if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } + } + if isHistogram { if h != nil { ref, err = app.AppendHistogram(ref, lset, t, h, nil) diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 62f662693b..0605f53490 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -30,7 +30,6 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" ) @@ -87,12 +86,12 @@ type MetricMetadataStore interface { // MetricMetadata is a piece of metadata for a metric. type MetricMetadata struct { Metric string - Type textparse.MetricType + Type model.MetricType Help string Unit string } -func (t *Target) MetadataList() []MetricMetadata { +func (t *Target) ListMetadata() []MetricMetadata { t.mtx.RLock() defer t.mtx.RUnlock() @@ -102,7 +101,7 @@ func (t *Target) MetadataList() []MetricMetadata { return t.metadata.ListMetadata() } -func (t *Target) MetadataSize() int { +func (t *Target) SizeMetadata() int { t.mtx.RLock() defer t.mtx.RUnlock() @@ -113,7 +112,7 @@ func (t *Target) MetadataSize() int { return t.metadata.SizeMetadata() } -func (t *Target) MetadataLength() int { +func (t *Target) LengthMetadata() int { t.mtx.RLock() defer t.mtx.RUnlock() @@ -124,8 +123,8 @@ func (t *Target) MetadataLength() int { return t.metadata.LengthMetadata() } -// Metadata returns type and help metadata for the given metric. -func (t *Target) Metadata(metric string) (MetricMetadata, bool) { +// GetMetadata returns type and help metadata for the given metric. +func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) { t.mtx.RLock() defer t.mtx.RUnlock() diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index d2d89e0425..d19f841d43 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -74,7 +74,7 @@ func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) // Buffer returns an iterator over the buffered data. Invalidates previously // returned iterators. -func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator { +func (b *BufferedSeriesIterator) Buffer() *SampleRingIterator { return b.buf.iterator() } @@ -202,7 +202,7 @@ func (s hSample) H() *histogram.Histogram { } func (s hSample) FH() *histogram.FloatHistogram { - return s.h.ToFloat() + return s.h.ToFloat(nil) } func (s hSample) Type() chunkenc.ValueType { @@ -252,7 +252,7 @@ type sampleRing struct { f int // Position of first element in ring buffer. l int // Number of elements in buffer. - it sampleRingIterator + it SampleRingIterator } type bufType int @@ -304,13 +304,15 @@ func (r *sampleRing) reset() { } // Returns the current iterator. Invalidates previously returned iterators. -func (r *sampleRing) iterator() chunkenc.Iterator { +func (r *sampleRing) iterator() *SampleRingIterator { r.it.r = r r.it.i = -1 return &r.it } -type sampleRingIterator struct { +// SampleRingIterator is returned by BufferedSeriesIterator.Buffer() and can be +// used to iterate samples buffered in the lookback window. +type SampleRingIterator struct { r *sampleRing i int t int64 @@ -319,7 +321,7 @@ type sampleRingIterator struct { fh *histogram.FloatHistogram } -func (it *sampleRingIterator) Next() chunkenc.ValueType { +func (it *SampleRingIterator) Next() chunkenc.ValueType { it.i++ if it.i >= it.r.l { return chunkenc.ValNone @@ -358,30 +360,28 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { } } -func (it *sampleRingIterator) Seek(int64) chunkenc.ValueType { - return chunkenc.ValNone -} - -func (it *sampleRingIterator) Err() error { - return nil -} - -func (it *sampleRingIterator) At() (int64, float64) { +// At returns the current float element of the iterator. +func (it *SampleRingIterator) At() (int64, float64) { return it.t, it.f } -func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { +// AtHistogram returns the current histogram element of the iterator. +func (it *SampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { return it.t, it.h } -func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { +// AtFloatHistogram returns the current histogram element of the iterator. If the +// current sample is an integer histogram, it will be converted to a float histogram. +// An optional histogram.FloatHistogram can be provided to avoid allocating a new +// object for the conversion. +func (it *SampleRingIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { if it.fh == nil { - return it.t, it.h.ToFloat() + return it.t, it.h.ToFloat(fh) } return it.t, it.fh } -func (it *sampleRingIterator) AtT() int64 { +func (it *SampleRingIterator) AtT() int64 { return it.t } diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 33257046f2..a9a3f904b6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -202,6 +202,20 @@ func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metada return ref, nil } +func (f *fanoutAppender) AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) { + ref, err := f.primary.AppendCTZeroSample(ref, l, t, ct) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendCTZeroSample(ref, l, t, ct); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) Commit() (err error) { err = f.primary.Commit() diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 2b1b6a63eb..675e44c0ee 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -43,6 +43,13 @@ var ( ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") + + // ErrOutOfOrderCT indicates failed append of CT to the storage + // due to CT being older the then newer sample. + // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected + // behaviour, and we currently don't have a way to determine this. As a result + // it's recommended to ignore this error for now. + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -237,6 +244,7 @@ type Appender interface { ExemplarAppender HistogramAppender MetadataUpdater + CreatedTimestampAppender } // GetRef is an extra interface on Appenders used by downstream projects @@ -294,6 +302,24 @@ type MetadataUpdater interface { UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) } +// CreatedTimestampAppender provides an interface for appending CT to storage. +type CreatedTimestampAppender interface { + // AppendCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendCTZeroSample has to be called before the corresponding sample Append. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) +} + // SeriesSet contains a set of series. type SeriesSet interface { Next() bool diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 67035cd8ec..ffab821a5f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -33,7 +33,6 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -784,7 +783,7 @@ func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label } // metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. -func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType { +func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType { mt := strings.ToUpper(string(t)) v, ok := prompb.MetricMetadata_MetricType_value[mt] if !ok { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index 21de565ed9..abfea3c7b0 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -136,7 +136,7 @@ func (mw *MetadataWatcher) collect() { metadata := []scrape.MetricMetadata{} for _, tset := range mw.manager.TargetsActive() { for _, target := range tset { - for _, entry := range target.MetadataList() { + for _, entry := range target.ListMetadata() { if _, ok := metadataSet[entry]; !ok { metadata = append(metadata, entry) metadataSet[entry] = struct{}{} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index a25c7d90c0..e37ec8c705 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" @@ -51,6 +52,10 @@ const ( // Allow 30% too many shards before scaling down. shardToleranceFraction = 0.3 + + reasonTooOld = "too_old" + reasonDroppedSeries = "dropped_series" + reasonUnintentionalDroppedSeries = "unintentionally_dropped_series" ) type queueManagerMetrics struct { @@ -68,9 +73,9 @@ type queueManagerMetrics struct { retriedExemplarsTotal prometheus.Counter retriedHistogramsTotal prometheus.Counter retriedMetadataTotal prometheus.Counter - droppedSamplesTotal prometheus.Counter - droppedExemplarsTotal prometheus.Counter - droppedHistogramsTotal prometheus.Counter + droppedSamplesTotal *prometheus.CounterVec + droppedExemplarsTotal *prometheus.CounterVec + droppedHistogramsTotal *prometheus.CounterVec enqueueRetriesTotal prometheus.Counter sentBatchDuration prometheus.Histogram highestSentTimestamp *maxTimestamp @@ -180,27 +185,27 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) - m.droppedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + m.droppedSamplesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_dropped_total", - Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, - }) - m.droppedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + }, []string{"reason"}) + m.droppedExemplarsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_dropped_total", - Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, - }) - m.droppedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + }, []string{"reason"}) + m.droppedHistogramsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_dropped_total", - Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, - }) + }, []string{"reason"}) m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -391,7 +396,8 @@ type WriteClient interface { // indicated by the provided WriteClient. Implements writeTo interface // used by WAL Watcher. type QueueManager struct { - lastSendTimestamp atomic.Int64 + lastSendTimestamp atomic.Int64 + buildRequestLimitTimestamp atomic.Int64 logger log.Logger flushDeadline time.Duration @@ -529,7 +535,7 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples. - req, _, err := buildWriteRequest(nil, metadata, pBuf, nil) + req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil) if err != nil { return err } @@ -575,18 +581,65 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p return nil } +func isSampleOld(baseTime time.Time, sampleAgeLimit time.Duration, ts int64) bool { + if sampleAgeLimit == 0 { + // If sampleAgeLimit is unset, then we never skip samples due to their age. + return false + } + limitTs := baseTime.Add(-sampleAgeLimit) + sampleTs := timestamp.Time(ts) + return sampleTs.Before(limitTs) +} + +func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts prompb.TimeSeries) bool { + return func(ts prompb.TimeSeries) bool { + if sampleAgeLimit == 0 { + // If sampleAgeLimit is unset, then we never skip samples due to their age. + return false + } + switch { + // Only the first element should be set in the series, therefore we only check the first element. + case len(ts.Samples) > 0: + if isSampleOld(baseTime, sampleAgeLimit, ts.Samples[0].Timestamp) { + metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc() + return true + } + case len(ts.Histograms) > 0: + if isSampleOld(baseTime, sampleAgeLimit, ts.Histograms[0].Timestamp) { + metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc() + return true + } + case len(ts.Exemplars) > 0: + if isSampleOld(baseTime, sampleAgeLimit, ts.Exemplars[0].Timestamp) { + metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc() + return true + } + default: + return false + } + return false + } +} + // Append queues a sample to be sent to the remote storage. Blocks until all samples are // enqueued on their shards or a shutdown signal is received. func (t *QueueManager) Append(samples []record.RefSample) bool { + currentTime := time.Now() outer: for _, s := range samples { + if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), s.T) { + t.metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc() + continue + } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[s.Ref] if !ok { - t.metrics.droppedSamplesTotal.Inc() t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() + } else { + t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue @@ -629,17 +682,23 @@ func (t *QueueManager) AppendExemplars(exemplars []record.RefExemplar) bool { if !t.sendExemplars { return true } - + currentTime := time.Now() outer: for _, e := range exemplars { + if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), e.T) { + t.metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc() + continue + } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[e.Ref] if !ok { - t.metrics.droppedExemplarsTotal.Inc() // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() + } else { + t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue @@ -678,16 +737,22 @@ func (t *QueueManager) AppendHistograms(histograms []record.RefHistogramSample) if !t.sendNativeHistograms { return true } - + currentTime := time.Now() outer: for _, h := range histograms { + if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), h.T) { + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc() + continue + } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[h.Ref] if !ok { - t.metrics.droppedHistogramsTotal.Inc() t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() + } else { + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue @@ -725,16 +790,22 @@ func (t *QueueManager) AppendFloatHistograms(floatHistograms []record.RefFloatHi if !t.sendNativeHistograms { return true } - + currentTime := time.Now() outer: for _, h := range floatHistograms { + if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), h.T) { + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc() + continue + } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[h.Ref] if !ok { - t.metrics.droppedHistogramsTotal.Inc() t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() + } else { + t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue @@ -1490,7 +1561,8 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s // sendSamples to the remote storage with backoff for recoverable errors. func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. - req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) + req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, *buf, nil) + s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. @@ -1504,6 +1576,25 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. attemptStore := func(try int) error { + currentTime := time.Now() + lowest := s.qm.buildRequestLimitTimestamp.Load() + if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) { + // This will filter out old samples during retries. + req, _, lowest, err := buildWriteRequest( + s.qm.logger, + samples, + nil, + pBuf, + *buf, + isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)), + ) + s.qm.buildRequestLimitTimestamp.Store(lowest) + if err != nil { + return err + } + *buf = req + } + ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch") defer span.End() @@ -1608,9 +1699,27 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l } } -func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte) ([]byte, int64, error) { +func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeries) bool) (int64, int64, []prompb.TimeSeries, int, int, int) { var highest int64 - for _, ts := range samples { + var lowest int64 + var droppedSamples, droppedExemplars, droppedHistograms int + + keepIdx := 0 + lowest = math.MaxInt64 + for i, ts := range timeSeries { + if filter != nil && filter(ts) { + if len(ts.Samples) > 0 { + droppedSamples++ + } + if len(ts.Exemplars) > 0 { + droppedExemplars++ + } + if len(ts.Histograms) > 0 { + droppedHistograms++ + } + continue + } + // At the moment we only ever append a TimeSeries with a single sample or exemplar in it. if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest { highest = ts.Samples[0].Timestamp @@ -1621,10 +1730,37 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest { highest = ts.Histograms[0].Timestamp } + + // Get lowest timestamp + if len(ts.Samples) > 0 && ts.Samples[0].Timestamp < lowest { + lowest = ts.Samples[0].Timestamp + } + if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp < lowest { + lowest = ts.Exemplars[0].Timestamp + } + if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest { + lowest = ts.Histograms[0].Timestamp + } + + // Move the current element to the write position and increment the write pointer + timeSeries[keepIdx] = timeSeries[i] + keepIdx++ + } + + timeSeries = timeSeries[:keepIdx] + return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms +} + +func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) { + highest, lowest, timeSeries, + droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) + + if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { + level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &prompb.WriteRequest{ - Timeseries: samples, + Timeseries: timeSeries, Metadata: metadata, } @@ -1635,7 +1771,7 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta } err := pBuf.Marshal(req) if err != nil { - return nil, highest, err + return nil, highest, lowest, err } // snappy uses len() to see if it needs to allocate a new slice. Make the @@ -1644,5 +1780,5 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta buf = buf[0:cap(buf)] } compressed := snappy.Encode(buf, pBuf.Bytes()) - return compressed, highest, nil + return compressed, highest, lowest, nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 237f8caa91..66455cb4dd 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -303,6 +303,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + return 0, nil +} + // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 9891c6aae7..971f7b457e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -68,7 +68,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err = h.write(r.Context(), req) switch { case err == nil: - case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index a586536b15..e2562de03c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -17,6 +17,7 @@ package tsdb import ( "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -26,7 +27,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -479,14 +479,19 @@ func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, ma slices.Sort(st) } } - - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { st, err := r.ir.LabelValues(ctx, name) - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } return labelValuesWithMatchers(ctx, r.ir, name, matchers...) @@ -503,7 +508,7 @@ func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { p, err := r.ir.Postings(ctx, name, values...) if err != nil { - return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return p, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return p, nil } @@ -514,7 +519,7 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { if err := r.ir.Series(ref, builder, chks); err != nil { - return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return nil } @@ -566,7 +571,7 @@ func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Mat p, err := PostingsForMatchers(ctx, pb.indexr, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } ir := pb.indexr @@ -654,12 +659,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) if err := os.MkdirAll(blockDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot block dir") + return fmt.Errorf("create snapshot block dir: %w", err) } chunksDir := chunkDir(blockDir) if err := os.MkdirAll(chunksDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot chunk dir") + return fmt.Errorf("create snapshot chunk dir: %w", err) } // Hardlink meta, index and tombstones @@ -669,7 +674,7 @@ func (pb *Block) Snapshot(dir string) error { tombstones.TombstonesFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { - return errors.Wrapf(err, "create snapshot %s", fname) + return fmt.Errorf("create snapshot %s: %w", fname, err) } } @@ -677,13 +682,13 @@ func (pb *Block) Snapshot(dir string) error { curChunkDir := chunkDir(pb.dir) files, err := os.ReadDir(curChunkDir) if err != nil { - return errors.Wrap(err, "ReadDir the current chunk dir") + return fmt.Errorf("ReadDir the current chunk dir: %w", err) } for _, f := range files { err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name())) if err != nil { - return errors.Wrap(err, "hardlink a chunk") + return fmt.Errorf("hardlink a chunk: %w", err) } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 0d017e095f..73bc5f1e35 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -15,13 +15,14 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" @@ -65,7 +66,7 @@ func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWrite func (w *BlockWriter) initHead() error { chunkDir, err := os.MkdirTemp(os.TempDir(), "head") if err != nil { - return errors.Wrap(err, "create temp dir") + return fmt.Errorf("create temp dir: %w", err) } w.chunkDir = chunkDir opts := DefaultHeadOptions() @@ -74,7 +75,7 @@ func (w *BlockWriter) initHead() error { opts.EnableNativeHistograms.Store(true) h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { - return errors.Wrap(err, "tsdb.NewHead") + return fmt.Errorf("tsdb.NewHead: %w", err) } w.head = h @@ -102,11 +103,11 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { []int64{w.blockSize}, chunkenc.NewPool(), nil) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "create leveled compactor") + return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) } id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "compactor write") + return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) } return id, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index f22285a0ca..2c6db3637a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -671,7 +671,7 @@ func (s *Reader) Size() int64 { return s.size } -// Chunk returns a chunk from a given reference. +// ChunkOrIterable returns a chunk from a given reference. func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index b495b61828..12c3e7b900 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -111,6 +111,10 @@ func (e *CorruptionErr) Error() string { return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error() } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // chunkPos keeps track of the position in the head chunk files. // chunkPos is not thread-safe, a lock must be used to protect it. type chunkPos struct { @@ -400,7 +404,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro buf := make([]byte, MagicChunksSize) size, err := f.Read(buf) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err) } if err := f.Close(); err != nil { @@ -892,7 +896,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding. chkEnc = cdm.RemoveMasks(chkEnc) if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil { - if cerr, ok := err.(*CorruptionErr); ok { + var cerr *CorruptionErr + if errors.As(err, &cerr) { cerr.Dir = cdm.dir.Name() cerr.FileIndex = segID return cerr diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 32c88d2cc0..511c518d98 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -16,6 +16,7 @@ package tsdb import ( "context" "crypto/rand" + "errors" "fmt" "io" "os" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -75,13 +75,15 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { - metrics *CompactorMetrics - logger log.Logger - ranges []int64 - chunkPool chunkenc.Pool - ctx context.Context - maxBlockChunkSegmentSize int64 - mergeFunc storage.VerticalChunkSeriesMergeFunc + metrics *CompactorMetrics + logger log.Logger + ranges []int64 + chunkPool chunkenc.Pool + ctx context.Context + maxBlockChunkSegmentSize int64 + mergeFunc storage.VerticalChunkSeriesMergeFunc + postingsEncoder index.PostingsEncoder + enableOverlappingCompaction bool } type CompactorMetrics struct { @@ -144,12 +146,35 @@ func newCompactorMetrics(r prometheus.Registerer) *CompactorMetrics { return m } -// NewLeveledCompactor returns a LeveledCompactor. -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { - return NewLeveledCompactorWithChunkSize(ctx, r, l, ranges, pool, chunks.DefaultChunkSegmentSize, mergeFunc) +type LeveledCompactorOptions struct { + // PE specifies the postings encoder. It is called when compactor is writing out the postings for a label name/value pair during compaction. + // If it is nil then the default encoder is used. At the moment that is the "raw" encoder. See index.EncodePostingsRaw for more. + PE index.PostingsEncoder + // MaxBlockChunkSegmentSize is the max block chunk segment size. If it is 0 then the default chunks.DefaultChunkSegmentSize is used. + MaxBlockChunkSegmentSize int64 + // MergeFunc is used for merging series together in vertical compaction. By default storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) is used. + MergeFunc storage.VerticalChunkSeriesMergeFunc + // EnableOverlappingCompaction enables compaction of overlapping blocks. In Prometheus it is always enabled. + // It is useful for downstream projects like Mimir, Cortex, Thanos where they have a separate component that does compaction. + EnableOverlappingCompaction bool } func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { + return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ + MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, + MergeFunc: mergeFunc, + EnableOverlappingCompaction: true, + }) +} + +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { + return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ + MergeFunc: mergeFunc, + EnableOverlappingCompaction: true, + }) +} + +func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, fmt.Errorf("at least one range must be provided") } @@ -159,17 +184,28 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register if l == nil { l = log.NewNopLogger() } + mergeFunc := opts.MergeFunc if mergeFunc == nil { mergeFunc = storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) } + var maxBlockChunkSegmentSize int64 + if opts.MaxBlockChunkSegmentSize == 0 { + maxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize + } + var pe index.PostingsEncoder + if opts.PE == nil { + pe = index.EncodePostingsRaw + } return &LeveledCompactor{ - ranges: ranges, - chunkPool: pool, - logger: l, - metrics: newCompactorMetrics(r), - ctx: ctx, - maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, - mergeFunc: mergeFunc, + ranges: ranges, + chunkPool: pool, + logger: l, + metrics: newCompactorMetrics(r), + ctx: ctx, + maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, + mergeFunc: mergeFunc, + postingsEncoder: pe, + enableOverlappingCompaction: opts.EnableOverlappingCompaction, }, nil } @@ -288,6 +324,9 @@ func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta { // selectOverlappingDirs returns all dirs with overlapping time ranges. // It expects sorted input by mint and returns the overlapping dirs in the same order as received. func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string { + if !c.enableOverlappingCompaction { + return nil + } if len(ds) < 2 { return nil } @@ -485,7 +524,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, if !errors.Is(err, context.Canceled) { for _, b := range bs { if err := b.setCompactionFailed(); err != nil { - errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) + errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err)) } } } @@ -586,7 +625,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl chunkw, err = chunks.NewWriterWithSegSize(chunkDir(tmp), c.maxBlockChunkSegmentSize) if err != nil { - return errors.Wrap(err, "open chunk writer") + return fmt.Errorf("open chunk writer: %w", err) } closers = append(closers, chunkw) // Record written chunk sizes on level 1 compactions. @@ -599,14 +638,14 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } } - indexw, err := index.NewWriter(c.ctx, filepath.Join(tmp, indexFilename)) + indexw, err := index.NewWriterWithEncoder(c.ctx, filepath.Join(tmp, indexFilename), c.postingsEncoder) if err != nil { - return errors.Wrap(err, "open index writer") + return fmt.Errorf("open index writer: %w", err) } closers = append(closers, indexw) if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { - return errors.Wrap(err, "populate block") + return fmt.Errorf("populate block: %w", err) } select { @@ -634,17 +673,17 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } if _, err = writeMetaFile(c.logger, tmp, meta); err != nil { - return errors.Wrap(err, "write merged meta") + return fmt.Errorf("write merged meta: %w", err) } // Create an empty tombstones file. if _, err := tombstones.WriteFile(c.logger, tmp, tombstones.NewMemTombstones()); err != nil { - return errors.Wrap(err, "write new tombstones file") + return fmt.Errorf("write new tombstones file: %w", err) } df, err := fileutil.OpenDir(tmp) if err != nil { - return errors.Wrap(err, "open temporary block dir") + return fmt.Errorf("open temporary block dir: %w", err) } defer func() { if df != nil { @@ -653,18 +692,18 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl }() if err := df.Sync(); err != nil { - return errors.Wrap(err, "sync temporary dir file") + return fmt.Errorf("sync temporary dir file: %w", err) } // Close temp dir before rename block dir (for windows platform). if err = df.Close(); err != nil { - return errors.Wrap(err, "close temporary dir") + return fmt.Errorf("close temporary dir: %w", err) } df = nil // Block successfully written, make it visible in destination dir by moving it from tmp one. if err := fileutil.Replace(tmp, dir); err != nil { - return errors.Wrap(err, "rename block dir") + return fmt.Errorf("rename block dir: %w", err) } return nil @@ -693,7 +732,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa defer func() { errs := tsdb_errors.NewMulti(err) if cerr := tsdb_errors.CloseAll(closers); cerr != nil { - errs.Add(errors.Wrap(cerr, "close")) + errs.Add(fmt.Errorf("close: %w", cerr)) } err = errs.Err() metrics.PopulatingBlocks.Set(0) @@ -721,19 +760,19 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa indexr, err := b.Index() if err != nil { - return errors.Wrapf(err, "open index reader for block %+v", b.Meta()) + return fmt.Errorf("open index reader for block %+v: %w", b.Meta(), err) } closers = append(closers, indexr) chunkr, err := b.Chunks() if err != nil { - return errors.Wrapf(err, "open chunk reader for block %+v", b.Meta()) + return fmt.Errorf("open chunk reader for block %+v: %w", b.Meta(), err) } closers = append(closers, chunkr) tombsr, err := b.Tombstones() if err != nil { - return errors.Wrapf(err, "open tombstone reader for block %+v", b.Meta()) + return fmt.Errorf("open tombstone reader for block %+v: %w", b.Meta(), err) } closers = append(closers, tombsr) @@ -755,11 +794,11 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa for symbols.Next() { if err := indexw.AddSymbol(symbols.At()); err != nil { - return errors.Wrap(err, "add symbol") + return fmt.Errorf("add symbol: %w", err) } } - if symbols.Err() != nil { - return errors.Wrap(symbols.Err(), "next symbol") + if err := symbols.Err(); err != nil { + return fmt.Errorf("next symbol: %w", err) } var ( @@ -791,8 +830,8 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa // chunk file purposes. chks = append(chks, chksIter.At()) } - if chksIter.Err() != nil { - return errors.Wrap(chksIter.Err(), "chunk iter") + if err := chksIter.Err(); err != nil { + return fmt.Errorf("chunk iter: %w", err) } // Skip the series with all deleted chunks. @@ -801,10 +840,10 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa } if err := chunkw.WriteChunks(chks...); err != nil { - return errors.Wrap(err, "write chunks") + return fmt.Errorf("write chunks: %w", err) } if err := indexw.AddSeries(ref, s.Labels(), chks...); err != nil { - return errors.Wrap(err, "add series") + return fmt.Errorf("add series: %w", err) } meta.Stats.NumChunks += uint64(len(chks)) @@ -815,13 +854,13 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa for _, chk := range chks { if err := chunkPool.Put(chk.Chunk); err != nil { - return errors.Wrap(err, "put chunk") + return fmt.Errorf("put chunk: %w", err) } } ref++ } - if set.Err() != nil { - return errors.Wrap(set.Err(), "iterate compaction set") + if err := set.Err(); err != nil { + return fmt.Errorf("iterate compaction set: %w", err) } return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 2e3801a9e0..bdeb3faee8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -16,6 +16,7 @@ package tsdb import ( "context" + "errors" "fmt" "io" "io/fs" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -70,20 +70,19 @@ var ErrNotReady = errors.New("TSDB not ready") // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wlog.DefaultSegmentSize, - MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, - RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), - MinBlockDuration: DefaultBlockDuration, - MaxBlockDuration: DefaultBlockDuration, - NoLockfile: false, - AllowOverlappingCompaction: true, - SamplesPerChunk: DefaultSamplesPerChunk, - WALCompression: wlog.CompressionNone, - StripeSize: DefaultStripeSize, - HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, - IsolationDisabled: defaultIsolationDisabled, - HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, - OutOfOrderCapMax: DefaultOutOfOrderCapMax, + WALSegmentSize: wlog.DefaultSegmentSize, + MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, + RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), + MinBlockDuration: DefaultBlockDuration, + MaxBlockDuration: DefaultBlockDuration, + NoLockfile: false, + SamplesPerChunk: DefaultSamplesPerChunk, + WALCompression: wlog.CompressionNone, + StripeSize: DefaultStripeSize, + HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, + IsolationDisabled: defaultIsolationDisabled, + HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, + OutOfOrderCapMax: DefaultOutOfOrderCapMax, } } @@ -115,14 +114,6 @@ type Options struct { // NoLockfile disables creation and consideration of a lock file. NoLockfile bool - // Compaction of overlapping blocks are allowed if AllowOverlappingCompaction is true. - // This is an optional flag for overlapping blocks. - // The reason why this flag exists is because there are various users of the TSDB - // that do not want vertical compaction happening on ingest time. Instead, - // they'd rather keep overlapping blocks and let another component do the overlapping compaction later. - // For Prometheus, this will always be true. - AllowOverlappingCompaction bool - // WALCompression configures the compression type to use on records in the WAL. WALCompression wlog.CompressionType @@ -386,7 +377,7 @@ type DBReadOnly struct { // OpenDBReadOnly opens DB in the given directory for read only operations. func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { - return nil, errors.Wrap(err, "opening the db dir") + return nil, fmt.Errorf("opening the db dir: %w", err) } if l == nil { @@ -407,7 +398,7 @@ func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { blockReaders, err := db.Blocks() if err != nil { - return errors.Wrap(err, "read blocks") + return fmt.Errorf("read blocks: %w", err) } maxBlockTime := int64(math.MinInt64) if len(blockReaders) > 0 { @@ -432,15 +423,16 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { return err } defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(head.Close(), "closing Head"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := head.Close(); err != nil { + errs.Add(fmt.Errorf("closing Head: %w", err)) + } + returnErr = errs.Err() }() // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return errors.Wrap(err, "read WAL") + return fmt.Errorf("read WAL: %w", err) } mint := head.MinTime() maxt := head.MaxTime() @@ -450,16 +442,18 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { nil, db.logger, ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5), - chunkenc.NewPool(), - nil, + chunkenc.NewPool(), nil, ) if err != nil { - return errors.Wrap(err, "create leveled compactor") + return fmt.Errorf("create leveled compactor: %w", err) } // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. _, err = compactor.Write(dir, rh, mint, maxt+1, nil) - return errors.Wrap(err, "writing WAL") + if err != nil { + return fmt.Errorf("writing WAL: %w", err) + } + return nil } func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQueryable, error) { @@ -518,7 +512,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return nil, errors.Wrap(err, "read WAL") + return nil, fmt.Errorf("read WAL: %w", err) } // Set the wal to nil to disable all wal operations. // This is mainly to avoid blocking when closing the head. @@ -580,7 +574,9 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return nil, errs.Err() } @@ -761,7 +757,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Fixup bad format written by Prometheus 2.1. if err := repairBadIndexVersion(l, dir); err != nil { - return nil, errors.Wrap(err, "repair bad index version") + return nil, fmt.Errorf("repair bad index version: %w", err) } walDir := filepath.Join(dir, "wal") @@ -769,12 +765,12 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { - return nil, errors.Wrap(err, "migrate WAL") + return nil, fmt.Errorf("migrate WAL: %w", err) } for _, tmpDir := range []string{walDir, dir} { // Remove tmp dirs. if err := removeBestEffortTmpDirs(l, tmpDir); err != nil { - return nil, errors.Wrap(err, "remove tmp dirs") + return nil, fmt.Errorf("remove tmp dirs: %w", err) } } @@ -797,11 +793,11 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } close(db.donec) // DB is never run if it was an error, so close this channel here. - - returnedErr = tsdb_errors.NewMulti( - returnedErr, - errors.Wrap(db.Close(), "close DB after failed startup"), - ).Err() + errs := tsdb_errors.NewMulti(returnedErr) + if err := db.Close(); err != nil { + errs.Add(fmt.Errorf("close DB after failed startup: %w", err)) + } + returnedErr = errs.Err() }() if db.blocksToDelete == nil { @@ -823,7 +819,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil) if err != nil { cancel() - return nil, errors.Wrap(err, "create leveled compactor") + return nil, fmt.Errorf("create leveled compactor: %w", err) } db.compactCancel = cancel @@ -905,17 +901,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - e, ok := initErr.(*errLoadWbl) - if ok { + var e *errLoadWbl + if errors.As(initErr, &e) { level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { - return nil, errors.Wrap(err, "repair corrupted WBL") + return nil, fmt.Errorf("repair corrupted WBL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WBL") } else { level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + return nil, fmt.Errorf("repair corrupted WAL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WAL") } @@ -1131,10 +1127,11 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { lastBlockMaxt := int64(math.MinInt64) defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(db.head.truncateWAL(lastBlockMaxt), "WAL truncation in Compact defer"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := db.head.truncateWAL(lastBlockMaxt); err != nil { + errs.Add(fmt.Errorf("WAL truncation in Compact defer: %w", err)) + } + returnErr = errs.Err() }() start := time.Now() @@ -1168,7 +1165,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { db.head.WaitForAppendersOverlapping(rh.MaxTime()) if err := db.compactHead(rh); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } // Consider only successful compactions for WAL truncation. lastBlockMaxt = maxt @@ -1177,7 +1174,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { // Clear some disk space before compacting blocks, especially important // when Head compaction happened over a long time range. if err := db.head.truncateWAL(lastBlockMaxt); err != nil { - return errors.Wrap(err, "WAL truncation in Compact") + return fmt.Errorf("WAL truncation in Compact: %w", err) } compactionDuration := time.Since(start) @@ -1192,7 +1189,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { if lastBlockMaxt != math.MinInt64 { // The head was compacted, so we compact OOO head as well. if err := db.compactOOOHead(ctx); err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } } @@ -1205,11 +1202,11 @@ func (db *DB) CompactHead(head *RangeHead) error { defer db.cmtx.Unlock() if err := db.compactHead(head); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } if err := db.head.truncateWAL(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "WAL truncation") + return fmt.Errorf("WAL truncation: %w", err) } return nil } @@ -1228,12 +1225,12 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } oooHead, err := NewOOOCompactionHead(ctx, db.head) if err != nil { - return errors.Wrap(err, "get ooo compaction head") + return fmt.Errorf("get ooo compaction head: %w", err) } ulids, err := db.compactOOO(db.dir, oooHead) if err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } if err := db.reloadBlocks(); err != nil { errs := tsdb_errors.NewMulti(err) @@ -1242,7 +1239,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { errs.Add(errRemoveAll) } } - return errors.Wrap(errs.Err(), "reloadBlocks blocks after failed compact ooo head") + return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errs.Err()) } lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() @@ -1262,7 +1259,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { - return errors.Wrap(err, "truncate ooo wbl") + return fmt.Errorf("truncate ooo wbl: %w", err) } } @@ -1298,12 +1295,12 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID blockDir := filepath.Join(dest, uid.String()) meta, _, err := readMetaFile(blockDir) if err != nil { - return ulids, errors.Wrap(err, "read meta") + return ulids, fmt.Errorf("read meta: %w", err) } meta.Compaction.SetOutOfOrder() _, err = writeMetaFile(db.logger, blockDir, meta) if err != nil { - return ulids, errors.Wrap(err, "write meta") + return ulids, fmt.Errorf("write meta: %w", err) } } } @@ -1329,20 +1326,20 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID func (db *DB) compactHead(head *RangeHead) error { uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { - return errors.Wrap(err, "persist head block") + return fmt.Errorf("persist head block: %w", err) } if err := db.reloadBlocks(); err != nil { if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { return tsdb_errors.NewMulti( - errors.Wrap(err, "reloadBlocks blocks"), - errors.Wrapf(errRemoveAll, "delete persisted head block after failed db reloadBlocks:%s", uid), + fmt.Errorf("reloadBlocks blocks: %w", err), + fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), ).Err() } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "head memory truncate") + return fmt.Errorf("head memory truncate: %w", err) } return nil } @@ -1354,7 +1351,7 @@ func (db *DB) compactBlocks() (err error) { for { plan, err := db.compactor.Plan(db.dir) if err != nil { - return errors.Wrap(err, "plan compaction") + return fmt.Errorf("plan compaction: %w", err) } if len(plan) == 0 { break @@ -1368,14 +1365,14 @@ func (db *DB) compactBlocks() (err error) { uid, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { - return errors.Wrapf(err, "compact %s", plan) + return fmt.Errorf("compact %s: %w", plan, err) } if err := db.reloadBlocks(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return errors.Wrapf(err, "delete compacted block after failed db reloadBlocks:%s", uid) + return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } } @@ -1396,14 +1393,14 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { // reload reloads blocks and truncates the head and its WAL. func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { - return errors.Wrap(err, "reloadBlocks") + return fmt.Errorf("reloadBlocks: %w", err) } maxt, ok := db.inOrderBlocksMaxTime() if !ok { return nil } if err := db.head.Truncate(maxt); err != nil { - return errors.Wrap(err, "head truncate") + return fmt.Errorf("head truncate: %w", err) } return nil } @@ -1457,7 +1454,9 @@ func (db *DB) reloadBlocks() (err error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return errs.Err() } @@ -1509,7 +1508,7 @@ func (db *DB) reloadBlocks() (err error) { } } if err := db.deleteBlocks(deletable); err != nil { - return errors.Wrapf(err, "delete %v blocks", len(deletable)) + return fmt.Errorf("delete %v blocks: %w", len(deletable), err) } return nil } @@ -1517,7 +1516,7 @@ func (db *DB) reloadBlocks() (err error) { func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { - return nil, nil, errors.Wrap(err, "find blocks") + return nil, nil, fmt.Errorf("find blocks: %w", err) } corrupted = make(map[ulid.ULID]error) @@ -1651,16 +1650,16 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { // Noop. continue case err != nil: - return errors.Wrapf(err, "stat dir %v", toDelete) + return fmt.Errorf("stat dir %v: %w", toDelete, err) } // Replace atomically to avoid partial block when process would crash during deletion. tmpToDelete := filepath.Join(db.dir, fmt.Sprintf("%s%s", ulid, tmpForDeletionBlockDirSuffix)) if err := fileutil.Replace(toDelete, tmpToDelete); err != nil { - return errors.Wrapf(err, "replace of obsolete block for deletion %s", ulid) + return fmt.Errorf("replace of obsolete block for deletion %s: %w", ulid, err) } if err := os.RemoveAll(tmpToDelete); err != nil { - return errors.Wrapf(err, "delete obsolete block %s", ulid) + return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) } @@ -1868,7 +1867,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { - return errors.Wrapf(err, "error snapshotting block: %s", b.Dir()) + return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) } } if !withHead { @@ -1881,7 +1880,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil { - return errors.Wrap(err, "snapshot head block") + return fmt.Errorf("snapshot head block: %w", err) } return nil } @@ -1916,7 +1915,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { var err error inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head %s", rh) + return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -1925,7 +1924,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head block querier %s", rh) + return nil, fmt.Errorf("closing head block querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -1933,7 +1932,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } } @@ -1950,7 +1949,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() - return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) + return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -1959,7 +1958,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -1997,7 +1996,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, mint, maxt) inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head %s", rh) + return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -2006,7 +2005,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head querier %s", rh) + return nil, fmt.Errorf("closing head querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -2014,7 +2013,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } } @@ -2027,7 +2026,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) + return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -2036,7 +2035,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -2105,7 +2104,7 @@ func (db *DB) CleanTombstones() (err error) { for _, pb := range db.Blocks() { uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) if cleanErr != nil { - return errors.Wrapf(cleanErr, "clean tombstones: %s", pb.Dir()) + return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) } if !safeToDelete { // There was nothing to clean. @@ -2133,7 +2132,10 @@ func (db *DB) CleanTombstones() (err error) { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } - return errors.Wrap(err, "reload blocks") + if err != nil { + return fmt.Errorf("reload blocks: %w", err) + } + return nil } } return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go index 8eaf42653c..805de70da3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "sync" "unicode/utf8" @@ -363,7 +364,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp err := ce.validateExemplar(seriesLabels, e, true) if err != nil { - if err == storage.ErrDuplicateExemplar { + if errors.Is(err, storage.ErrDuplicateExemplar) { // Duplicate exemplar, noop. return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 3ff2bee716..63d8e9ea13 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "io" "math" @@ -27,7 +28,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/client_golang/prometheus" @@ -149,6 +149,10 @@ type HeadOptions struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms atomic.Bool + // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool + ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -619,11 +623,11 @@ func (h *Head) Init(minValidTime int64) error { if h.wal != nil { _, endAt, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", err) } _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { + if err != nil && !errors.Is(err, record.ErrNotFound) { level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) } @@ -670,7 +674,8 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) - if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() } @@ -697,14 +702,14 @@ func (h *Head) Init(minValidTime int64) error { checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir()) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "find last checkpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("find last checkpoint: %w", err) } // Find the last segment. _, endAt, e := wlog.Segments(h.wal.Dir()) if e != nil { - return errors.Wrap(e, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", e) } h.startWALReplayStatus(startFrom, endAt) @@ -713,7 +718,7 @@ func (h *Head) Init(minValidTime int64) error { if err == nil && startFrom >= snapIdx { sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return errors.Wrap(err, "open checkpoint") + return fmt.Errorf("open checkpoint: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -724,7 +729,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. if err := h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { - return errors.Wrap(err, "backfill checkpoint") + return fmt.Errorf("backfill checkpoint: %w", err) } h.updateWALReplayStatusRead(startFrom) startFrom++ @@ -741,7 +746,7 @@ func (h *Head) Init(minValidTime int64) error { for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i)) if err != nil { - return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + return fmt.Errorf("open WAL segment: %d: %w", i, err) } offset := 0 @@ -754,7 +759,7 @@ func (h *Head) Init(minValidTime int64) error { continue } if err != nil { - return errors.Wrapf(err, "segment reader (offset=%d)", offset) + return fmt.Errorf("segment reader (offset=%d): %w", offset, err) } err = h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { @@ -773,14 +778,14 @@ func (h *Head) Init(minValidTime int64) error { // Replay WBL. startFrom, endAt, e = wlog.Segments(h.wbl.Dir()) if e != nil { - return &errLoadWbl{errors.Wrap(e, "finding WBL segments")} + return &errLoadWbl{fmt.Errorf("finding WBL segments: %w", e)} } h.startWALReplayStatus(startFrom, endAt) for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i)) if err != nil { - return &errLoadWbl{errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))} + return &errLoadWbl{fmt.Errorf("open WBL segment: %d: %w", i, err)} } sr := wlog.NewSegmentBufReader(s) @@ -901,7 +906,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) return nil }); err != nil { // secondLastRef because the lastRef caused an error. - return nil, nil, secondLastRef, errors.Wrap(err, "iterate on on-disk chunks") + return nil, nil, secondLastRef, fmt.Errorf("iterate on on-disk chunks: %w", err) } return mmappedChunks, oooMmappedChunks, lastRef, nil } @@ -1220,12 +1225,12 @@ func (h *Head) truncateWAL(mint int64) error { first, last, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "get segment range") + return fmt.Errorf("get segment range: %w", err) } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. if _, err := h.wal.NextSegment(); err != nil { - return errors.Wrap(err, "next segment") + return fmt.Errorf("next segment: %w", err) } last-- // Never consider last segment for checkpoint. if last < 0 { @@ -1252,10 +1257,11 @@ func (h *Head) truncateWAL(mint int64) error { h.metrics.checkpointCreationTotal.Inc() if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() - if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.walCorruptionsTotal.Inc() } - return errors.Wrap(err, "create checkpoint") + return fmt.Errorf("create checkpoint: %w", err) } if err := h.wal.Truncate(last + 1); err != nil { // If truncating fails, we'll just try again at the next checkpoint. @@ -1348,7 +1354,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { // Truncate the chunk m-mapper. if err := h.chunkDiskMapper.Truncate(uint32(minMmapFile)); err != nil { - return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number") + return fmt.Errorf("truncate chunks.HeadReadWriter by file number: %w", err) } return nil } @@ -1463,13 +1469,13 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match p, err := PostingsForMatchers(ctx, ir, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } var stones []tombstones.Stone for p.Next() { if err := ctx.Err(); err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } series := h.series.getByID(chunks.HeadSeriesRef(p.At())) @@ -1491,8 +1497,8 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match if p.Err() != nil { return p.Err() } - if ctx.Err() != nil { - return errors.Wrap(err, "select series") + if err := ctx.Err(); err != nil { + return fmt.Errorf("select series: %w", err) } if h.wal != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index be53a4f3f6..f112ffa3ae 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -15,11 +15,11 @@ package tsdb import ( "context" + "errors" "fmt" "math" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -87,6 +87,17 @@ func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m return a.app.UpdateMetadata(ref, l, m) } +func (a *initAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendCTZeroSample(ref, lset, t, ct) + } + + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendCTZeroSample(ref, lset, t, ct) +} + // initTime initializes a head with the first timestamp. This only needs to be called // for a completely fresh head with an empty WAL. func (h *Head) initTime(t int64) { @@ -319,28 +330,11 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { - // Ensure no empty labels have gotten through. - lset = lset.WithoutEmpty() - if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") - } - - if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) - } - - var created bool var err error - s, created, err = a.head.getOrCreate(lset.Hash(), lset) + s, err = a.getOrCreate(lset) if err != nil { return 0, err } - if created { - a.series = append(a.series, record.RefSeries{ - Ref: s.ref, - Labels: lset, - }) - } } if value.IsStaleNaN(v) { @@ -364,10 +358,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } if err != nil { - switch err { - case storage.ErrOutOfOrderSample: + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc() } return 0, err @@ -389,6 +383,71 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 return storage.SeriesRef(s.ref), nil } +// AppendCTZeroSample appends synthetic zero sample for ct timestamp. It returns +// error when sample can't be appended. See +// storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. +func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if ct >= t { + return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + } + + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + // Check if CT wouldn't be OOO vs samples we already might have for this series. + // NOTE(bwplotka): This will be often hit as it's expected for long living + // counters to share the same CT. + s.Lock() + isOOO, _, err := s.appendable(ct, 0, a.headMaxt, a.minValidTime, a.oooTimeWindow) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if err != nil { + return 0, err + } + if isOOO { + return storage.SeriesRef(s.ref), storage.ErrOutOfOrderCT + } + + if ct > a.maxt { + a.maxt = ct + } + a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0}) + a.sampleSeries = append(a.sampleSeries, s) + return storage.SeriesRef(s.ref), nil +} + +func (a *headAppender) getOrCreate(lset labels.Labels) (*memSeries, error) { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if lset.IsEmpty() { + return nil, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + } + if l, dup := lset.HasDuplicateLabelNames(); dup { + return nil, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + } + var created bool + var err error + s, created, err := a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return nil, err + } + if created { + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + return s, nil +} + // appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. @@ -498,7 +557,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, err := a.head.exemplars.ValidateExemplar(s.lset, e) if err != nil { - if err == storage.ErrDuplicateExemplar || err == storage.ErrExemplarsDisabled { + if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) { // Duplicate, don't return an error but don't accept the exemplar. return 0, nil } @@ -537,11 +596,11 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") + return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) } if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) } var created bool @@ -569,7 +628,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableHistogram(t, h); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -586,7 +645,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableFloatHistogram(t, fh); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -670,7 +729,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } } if len(a.metadata) > 0 { @@ -678,7 +737,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log metadata") + return fmt.Errorf("log metadata: %w", err) } } if len(a.samples) > 0 { @@ -686,21 +745,21 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log samples") + return fmt.Errorf("log samples: %w", err) } } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log histograms") + return fmt.Errorf("log histograms: %w", err) } } if len(a.floatHistograms) > 0 { rec = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log float histograms") + return fmt.Errorf("log float histograms: %w", err) } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -712,7 +771,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } } return nil @@ -741,7 +800,7 @@ func (a *headAppender) Commit() (err error) { if err := a.log(); err != nil { _ = a.Rollback() // Most likely the same error will happen again. - return errors.Wrap(err, "write to WAL") + return fmt.Errorf("write to WAL: %w", err) } if a.head.writeNotified != nil { @@ -759,7 +818,7 @@ func (a *headAppender) Commit() (err error) { } // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { - if err == storage.ErrOutOfOrderExemplar { + if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) @@ -839,16 +898,16 @@ func (a *headAppender) Commit() (err error) { series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch err { - case nil: + switch { + case err == nil: // Do nothing. - case storage.ErrOutOfOrderSample: + case errors.Is(err, storage.ErrOutOfOrderSample): samplesAppended-- oooRejected++ - case storage.ErrOutOfBounds: + case errors.Is(err, storage.ErrOutOfBounds): samplesAppended-- oobRejected++ - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): samplesAppended-- tooOldRejected++ default: @@ -979,7 +1038,7 @@ func (a *headAppender) Commit() (err error) { for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() - series.meta = &metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help} + series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.Unlock() } @@ -1428,7 +1487,7 @@ func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count i } func handleChunkWriteError(err error) { - if err != nil && err != chunks.ErrChunkDiskMapperClosed { + if err != nil && !errors.Is(err, chunks.ErrChunkDiskMapperClosed) { panic(err) } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index 35ef26a58a..362764480b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -15,11 +15,12 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "sync" "github.com/go-kit/log/level" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -133,7 +134,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { } } if err := p.Err(); err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) + return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } slices.SortFunc(series, func(a, b *memSeries) int { @@ -388,7 +389,8 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi if ix < len(s.mmappedChunks) { chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { panic(err) } return nil, false, false, err @@ -516,14 +518,15 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) } if err != nil { - return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") + return nil, fmt.Errorf("failed to convert ooo head chunk to xor chunk: %w", err) } iterable = xor } else { chk, err := cdm.Chunk(c.ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { - return nil, errors.Wrap(err, "invalid ooo mmapped chunk") + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { + return nil, fmt.Errorf("invalid ooo mmapped chunk: %w", err) } return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 07fa8280ca..1be65f1341 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -14,6 +14,7 @@ package tsdb import ( + "errors" "fmt" "math" "os" @@ -24,7 +25,6 @@ import ( "time" "github.com/go-kit/log/level" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +128,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. // At the moment the only possible error here is out of order exemplars, which we shouldn't see when // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) - if err != nil && err == storage.ErrOutOfOrderExemplar { + if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) } } @@ -145,7 +145,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. series, err = dec.Series(rec, series) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), + Err: fmt.Errorf("decode series: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -157,7 +157,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -169,7 +169,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. tstones, err = dec.Tombstones(rec, tstones) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode tombstones"), + Err: fmt.Errorf("decode tombstones: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -181,7 +181,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode exemplars"), + Err: fmt.Errorf("decode exemplars: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -193,7 +193,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode histograms"), + Err: fmt.Errorf("decode histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -205,7 +205,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode float histograms"), + Err: fmt.Errorf("decode float histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -217,7 +217,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. meta, err := dec.Metadata(rec, meta) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode metadata"), + Err: fmt.Errorf("decode metadata: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -388,7 +388,7 @@ Outer: continue } s.meta = &metadata.Metadata{ - Type: record.ToTextparseMetricType(m.Type), + Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help, } @@ -416,8 +416,8 @@ Outer: close(exemplarsInput) wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { @@ -708,7 +708,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -720,7 +720,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode mmap markers"), + Err: fmt.Errorf("decode mmap markers: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -806,8 +806,8 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. } wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { @@ -995,7 +995,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh chk, err := chunkenc.FromData(enc, chunkBytesCopy) if err != nil { - return csr, errors.Wrap(err, "chunk from data") + return csr, fmt.Errorf("chunk from data: %w", err) } csr.mc.chunk = chk @@ -1030,7 +1030,7 @@ func encodeTombstonesToSnapshotRecord(tr tombstones.Reader) ([]byte, error) { buf.PutByte(chunkSnapshotRecordTypeTombstones) b, err := tombstones.Encode(tr) if err != nil { - return nil, errors.Wrap(err, "encode tombstones") + return nil, fmt.Errorf("encode tombstones: %w", err) } buf.PutUvarintBytes(b) @@ -1045,7 +1045,10 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) { } tr, err := tombstones.Decode(dec.UvarintBytes()) - return tr, errors.Wrap(err, "decode tombstones") + if err != nil { + return tr, fmt.Errorf("decode tombstones: %w", err) + } + return tr, nil } const chunkSnapshotPrefix = "chunk_snapshot." @@ -1072,13 +1075,13 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats := &ChunkSnapshotStats{} wlast, woffset, err := h.wal.LastSegmentAndOffset() - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "get last wal segment and offset") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("get last wal segment and offset: %w", err) } _, cslast, csoffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "find last chunk snapshot") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("find last chunk snapshot: %w", err) } if wlast == cslast && woffset == csoffset { @@ -1093,11 +1096,11 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats.Dir = cpdir if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { - return stats, errors.Wrap(err, "create chunk snapshot dir") + return stats, fmt.Errorf("create chunk snapshot dir: %w", err) } cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType()) if err != nil { - return stats, errors.Wrap(err, "open chunk snapshot") + return stats, fmt.Errorf("open chunk snapshot: %w", err) } // Ensures that an early return caused by an error doesn't leave any tmp files. @@ -1126,7 +1129,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if len(buf) > 10*1024*1024 { if err := cp.Log(recs...); err != nil { h.series.locks[i].RUnlock() - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf, recs = buf[:0], recs[:0] } @@ -1139,16 +1142,16 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Add tombstones to the snapshot. tombstonesReader, err := h.Tombstones() if err != nil { - return stats, errors.Wrap(err, "get tombstones") + return stats, fmt.Errorf("get tombstones: %w", err) } rec, err := encodeTombstonesToSnapshotRecord(tombstonesReader) if err != nil { - return stats, errors.Wrap(err, "encode tombstones") + return stats, fmt.Errorf("encode tombstones: %w", err) } recs = append(recs, rec) // Flush remaining series records and tombstones. if err := cp.Log(recs...); err != nil { - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf = buf[:0] @@ -1167,7 +1170,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { encbuf.PutByte(chunkSnapshotRecordTypeExemplars) enc.EncodeExemplarsIntoBuffer(batch, &encbuf) if err := cp.Log(encbuf.Get()); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } buf, batch = buf[:0], batch[:0] return nil @@ -1175,7 +1178,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { err = h.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error { if len(batch) >= maxExemplarsPerRecord { if err := flushExemplars(); err != nil { - return errors.Wrap(err, "flush exemplars") + return fmt.Errorf("flush exemplars: %w", err) } } @@ -1193,19 +1196,19 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { return nil }) if err != nil { - return stats, errors.Wrap(err, "iterate exemplars") + return stats, fmt.Errorf("iterate exemplars: %w", err) } // Flush remaining exemplars. if err := flushExemplars(); err != nil { - return stats, errors.Wrap(err, "flush exemplars at the end") + return stats, fmt.Errorf("flush exemplars at the end: %w", err) } if err := cp.Close(); err != nil { - return stats, errors.Wrap(err, "close chunk snapshot") + return stats, fmt.Errorf("close chunk snapshot: %w", err) } if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { - return stats, errors.Wrap(err, "rename chunk snapshot directory") + return stats, fmt.Errorf("rename chunk snapshot directory: %w", err) } if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, wlast, woffset); err != nil { @@ -1229,7 +1232,10 @@ func (h *Head) performChunkSnapshot() error { if err == nil { level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } - return errors.Wrap(err, "chunk snapshot") + if err != nil { + return fmt.Errorf("chunk snapshot: %w", err) + } + return nil } // ChunkSnapshotStats returns stats about a created chunk snapshot. @@ -1327,16 +1333,16 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error { func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSeries, error) { dir, snapIdx, snapOffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil { - if err == record.ErrNotFound { + if errors.Is(err, record.ErrNotFound) { return snapIdx, snapOffset, nil, nil } - return snapIdx, snapOffset, nil, errors.Wrap(err, "find last chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("find last chunk snapshot: %w", err) } start := time.Now() sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return snapIdx, snapOffset, nil, errors.Wrap(err, "open chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("open chunk snapshot: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -1424,7 +1430,7 @@ Outer: numSeries++ csr, err := decodeSeriesFromChunkSnapshot(&dec, rec) if err != nil { - loopErr = errors.Wrap(err, "decode series record") + loopErr = fmt.Errorf("decode series record: %w", err) break Outer } recordChan <- csr @@ -1432,7 +1438,7 @@ Outer: case chunkSnapshotRecordTypeTombstones: tr, err := decodeTombstonesSnapshotRecord(rec) if err != nil { - loopErr = errors.Wrap(err, "decode tombstones") + loopErr = fmt.Errorf("decode tombstones: %w", err) break Outer } @@ -1440,7 +1446,7 @@ Outer: h.tombstones.AddInterval(ref, ivs...) return nil }); err != nil { - loopErr = errors.Wrap(err, "iterate tombstones") + loopErr = fmt.Errorf("iterate tombstones: %w", err) break Outer } @@ -1468,7 +1474,7 @@ Outer: exemplarBuf = exemplarBuf[:0] exemplarBuf, err = dec.ExemplarsFromBuffer(&decbuf, exemplarBuf) if err != nil { - loopErr = errors.Wrap(err, "exemplars from buffer") + loopErr = fmt.Errorf("exemplars from buffer: %w", err) break Outer } @@ -1484,7 +1490,7 @@ Outer: Value: e.V, Ts: e.T, }); err != nil { - loopErr = errors.Wrap(err, "add exemplar") + loopErr = fmt.Errorf("add exemplar: %w", err) break Outer } } @@ -1502,16 +1508,19 @@ Outer: } close(errChan) - merr := tsdb_errors.NewMulti(errors.Wrap(loopErr, "decode loop")) + merr := tsdb_errors.NewMulti() + if loopErr != nil { + merr.Add(fmt.Errorf("decode loop: %w", loopErr)) + } for err := range errChan { - merr.Add(errors.Wrap(err, "record processing")) + merr.Add(fmt.Errorf("record processing: %w", err)) } if err := merr.Err(); err != nil { return -1, -1, nil, err } - if r.Err() != nil { - return -1, -1, nil, errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return -1, -1, nil, fmt.Errorf("read records: %w", err) } if len(refSeries) == 0 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 44ee66386f..c2ca581f7c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -110,6 +110,8 @@ type symbolCacheEntry struct { lastValue string } +type PostingsEncoder func(*encoding.Encbuf, []uint32) error + // Writer implements the IndexWriter interface for the standard // serialization format. type Writer struct { @@ -148,6 +150,8 @@ type Writer struct { crc32 hash.Hash Version int + + postingsEncoder PostingsEncoder } // TOC represents index Table Of Content that states where each section of index starts. @@ -186,7 +190,8 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { } // NewWriter returns a new Writer to the given filename. It serializes data in format version 2. -func NewWriter(ctx context.Context, fn string) (*Writer, error) { +// It uses the given encoder to encode each postings list. +func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) { dir := filepath.Dir(fn) df, err := fileutil.OpenDir(dir) @@ -229,9 +234,10 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, - symbolCache: make(map[string]symbolCacheEntry, 1<<8), - labelNames: make(map[string]uint64, 1<<8), - crc32: newCRC32(), + symbolCache: make(map[string]symbolCacheEntry, 1<<8), + labelNames: make(map[string]uint64, 1<<8), + crc32: newCRC32(), + postingsEncoder: encoder, } if err := iw.writeMeta(); err != nil { return nil, err @@ -239,6 +245,12 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { return iw, nil } +// NewWriter creates a new index writer using the default encoder. See +// NewWriterWithEncoder. +func NewWriter(ctx context.Context, fn string) (*Writer, error) { + return NewWriterWithEncoder(ctx, fn, EncodePostingsRaw) +} + func (w *Writer) write(bufs ...[]byte) error { return w.f.Write(bufs...) } @@ -425,7 +437,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. if err := w.addPadding(16); err != nil { - return fmt.Errorf("failed to write padding bytes: %v", err) + return fmt.Errorf("failed to write padding bytes: %w", err) } if w.f.pos%16 != 0 { @@ -442,7 +454,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok { nameIndex, err = w.symbols.ReverseLookup(l.Name) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Name, err) } } w.labelNames[l.Name]++ @@ -452,7 +464,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok || cacheEntry.lastValue != l.Value { valueIndex, err = w.symbols.ReverseLookup(l.Value) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Value, err) } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, @@ -941,6 +953,20 @@ func (w *Writer) writePostingsToTmpFiles() error { return nil } +// EncodePostingsRaw uses the "basic" postings list encoding format with no compression: +// .... +func EncodePostingsRaw(e *encoding.Encbuf, offs []uint32) error { + e.PutBE32int(len(offs)) + + for _, off := range offs { + if off > (1<<32)-1 { + return fmt.Errorf("series offset %d exceeds 4 bytes", off) + } + e.PutBE32(off) + } + return nil +} + func (w *Writer) writePosting(name, value string, offs []uint32) error { // Align beginning to 4 bytes for more efficient postings list scans. if err := w.fP.AddPadding(4); err != nil { @@ -959,13 +985,8 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { w.cntPO++ w.buf1.Reset() - w.buf1.PutBE32int(len(offs)) - - for _, off := range offs { - if off > (1<<32)-1 { - return fmt.Errorf("series offset %d exceeds 4 bytes", off) - } - w.buf1.PutBE32(off) + if err := w.postingsEncoder(&w.buf1, offs); err != nil { + return err } w.buf2.Reset() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index c839574276..222a8b0d6f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -18,11 +18,13 @@ import ( "context" "encoding/binary" "fmt" + "math" "runtime" "sort" "strings" "sync" + "github.com/bboreham/go-loser" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -525,7 +527,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(ctx context.Context, its ...Postings) Postings { +func Merge(_ context.Context, its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } @@ -533,122 +535,48 @@ func Merge(ctx context.Context, its ...Postings) Postings { return its[0] } - p, ok := newMergedPostings(ctx, its) + p, ok := newMergedPostings(its) if !ok { return EmptyPostings() } return p } -type postingsHeap []Postings - -func (h postingsHeap) Len() int { return len(h) } -func (h postingsHeap) Less(i, j int) bool { return h[i].At() < h[j].At() } -func (h *postingsHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } - -func (h *postingsHeap) Push(x interface{}) { - *h = append(*h, x.(Postings)) -} - -func (h *postingsHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - type mergedPostings struct { - h postingsHeap - initialized bool - cur storage.SeriesRef - err error + p []Postings + h *loser.Tree[storage.SeriesRef, Postings] + cur storage.SeriesRef } -func newMergedPostings(ctx context.Context, p []Postings) (m *mergedPostings, nonEmpty bool) { - ph := make(postingsHeap, 0, len(p)) - - for _, it := range p { - // NOTE: mergedPostings struct requires the user to issue an initial Next. - switch { - case ctx.Err() != nil: - return &mergedPostings{err: ctx.Err()}, true - case it.Next(): - ph = append(ph, it) - case it.Err() != nil: - return &mergedPostings{err: it.Err()}, true - } - } - - if len(ph) == 0 { - return nil, false - } - return &mergedPostings{h: ph}, true +func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { + const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree. + lt := loser.New(p, maxVal) + return &mergedPostings{p: p, h: lt}, true } func (it *mergedPostings) Next() bool { - if it.h.Len() == 0 || it.err != nil { - return false - } - - // The user must issue an initial Next. - if !it.initialized { - heap.Init(&it.h) - it.cur = it.h[0].At() - it.initialized = true - return true - } - for { - cur := it.h[0] - if !cur.Next() { - heap.Pop(&it.h) - if cur.Err() != nil { - it.err = cur.Err() - return false - } - if it.h.Len() == 0 { - return false - } - } else { - // Value of top of heap has changed, re-heapify. - heap.Fix(&it.h, 0) + if !it.h.Next() { + return false } - - if it.h[0].At() != it.cur { - it.cur = it.h[0].At() + // Remove duplicate entries. + newItem := it.h.At() + if newItem != it.cur { + it.cur = newItem return true } } } func (it *mergedPostings) Seek(id storage.SeriesRef) bool { - if it.h.Len() == 0 || it.err != nil { - return false + for !it.h.IsEmpty() && it.h.At() < id { + finished := !it.h.Winner().Seek(id) + it.h.Fix(finished) } - if !it.initialized { - if !it.Next() { - return false - } - } - for it.cur < id { - cur := it.h[0] - if !cur.Seek(id) { - heap.Pop(&it.h) - if cur.Err() != nil { - it.err = cur.Err() - return false - } - if it.h.Len() == 0 { - return false - } - } else { - // Value of top of heap has changed, re-heapify. - heap.Fix(&it.h, 0) - } - - it.cur = it.h[0].At() + if it.h.IsEmpty() { + return false } + it.cur = it.h.At() return true } @@ -657,7 +585,12 @@ func (it mergedPostings) At() storage.SeriesRef { } func (it mergedPostings) Err() error { - return it.err + for _, p := range it.p { + if err := p.Err(); err != nil { + return err + } + } + return nil } // Without returns a new postings list that contains all elements from the full list that diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 6584d7da0a..f88e4415e4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -15,13 +15,13 @@ package tsdb import ( "context" + "errors" "fmt" "math" "strings" "unicode/utf8" "github.com/oklog/ulid" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" @@ -63,18 +63,18 @@ type blockBaseQuerier struct { func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) { indexr, err := b.Index() if err != nil { - return nil, errors.Wrap(err, "open index reader") + return nil, fmt.Errorf("open index reader: %w", err) } chunkr, err := b.Chunks() if err != nil { indexr.Close() - return nil, errors.Wrap(err, "open chunk reader") + return nil, fmt.Errorf("open chunk reader: %w", err) } tombsr, err := b.Tombstones() if err != nil { indexr.Close() chunkr.Close() - return nil, errors.Wrap(err, "open tombstone reader") + return nil, fmt.Errorf("open tombstone reader: %w", err) } if tombsr == nil { @@ -442,12 +442,12 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { p, err := PostingsForMatchers(ctx, r, matchers...) if err != nil { - return nil, errors.Wrap(err, "fetching postings for matchers") + return nil, fmt.Errorf("fetching postings for matchers: %w", err) } allValues, err := r.LabelValues(ctx, name) if err != nil { - return nil, errors.Wrapf(err, "fetching values of label %s", name) + return nil, fmt.Errorf("fetching values of label %s: %w", name, err) } // If we have a matcher for the label name, we can filter out values that don't match @@ -473,12 +473,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma for i, value := range allValues { valuesPostings[i], err = r.Postings(ctx, name, value) if err != nil { - return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value) + return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err) } } indexes, err := index.FindIntersectingPostings(p, valuesPostings) if err != nil { - return nil, errors.Wrap(err, "intersecting postings") + return nil, fmt.Errorf("intersecting postings: %w", err) } values := make([]string, 0, len(indexes)) @@ -499,8 +499,8 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab for p.Next() { postings = append(postings, p.At()) } - if p.Err() != nil { - return nil, errors.Wrapf(p.Err(), "postings for label names with matchers") + if err := p.Err(); err != nil { + return nil, fmt.Errorf("postings for label names with matchers: %w", err) } return r.LabelNamesFor(ctx, postings...) @@ -539,10 +539,10 @@ func (b *blockBaseSeriesSet) Next() bool { for b.p.Next() { if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil { // Postings may be stale. Skip if no underlying series exists. - if errors.Cause(err) == storage.ErrNotFound { + if errors.Is(err, storage.ErrNotFound) { continue } - b.err = errors.Wrapf(err, "get series %d", b.p.At()) + b.err = fmt.Errorf("get series %d: %w", b.p.At(), err) return false } @@ -552,7 +552,7 @@ func (b *blockBaseSeriesSet) Next() bool { intervals, err := b.tombstones.Get(b.p.At()) if err != nil { - b.err = errors.Wrap(err, "get tombstones") + b.err = fmt.Errorf("get tombstones: %w", err) return false } @@ -702,7 +702,7 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { } if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String()) + p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err) return false } @@ -900,7 +900,7 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { valueType := p.currDelIter.Next() if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) } return false } @@ -968,11 +968,11 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { } if err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } @@ -991,7 +991,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { firstValueType := p.currDelIter.Next() if firstValueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read") + p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err) return false } return false @@ -1075,11 +1075,11 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { } if err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err) return false } if err = p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err) return false } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 42a656dfe8..3931ad05d6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -20,9 +20,10 @@ import ( "fmt" "math" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -90,45 +91,45 @@ const ( Stateset MetricType = 7 ) -func GetMetricType(t textparse.MetricType) uint8 { +func GetMetricType(t model.MetricType) uint8 { switch t { - case textparse.MetricTypeCounter: + case model.MetricTypeCounter: return uint8(Counter) - case textparse.MetricTypeGauge: + case model.MetricTypeGauge: return uint8(Gauge) - case textparse.MetricTypeHistogram: + case model.MetricTypeHistogram: return uint8(HistogramSample) - case textparse.MetricTypeGaugeHistogram: + case model.MetricTypeGaugeHistogram: return uint8(GaugeHistogram) - case textparse.MetricTypeSummary: + case model.MetricTypeSummary: return uint8(Summary) - case textparse.MetricTypeInfo: + case model.MetricTypeInfo: return uint8(Info) - case textparse.MetricTypeStateset: + case model.MetricTypeStateset: return uint8(Stateset) default: return uint8(UnknownMT) } } -func ToTextparseMetricType(m uint8) textparse.MetricType { +func ToMetricType(m uint8) model.MetricType { switch m { case uint8(Counter): - return textparse.MetricTypeCounter + return model.MetricTypeCounter case uint8(Gauge): - return textparse.MetricTypeGauge + return model.MetricTypeGauge case uint8(HistogramSample): - return textparse.MetricTypeHistogram + return model.MetricTypeHistogram case uint8(GaugeHistogram): - return textparse.MetricTypeGaugeHistogram + return model.MetricTypeGaugeHistogram case uint8(Summary): - return textparse.MetricTypeSummary + return model.MetricTypeSummary case uint8(Info): - return textparse.MetricTypeInfo + return model.MetricTypeInfo case uint8(Stateset): - return textparse.MetricTypeStateset + return model.MetricTypeStateset default: - return textparse.MetricTypeUnknown + return model.MetricTypeUnknown } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 0811164541..9d2c5738d1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -22,7 +22,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -35,7 +34,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) if err != nil { - return errors.Wrapf(err, "list block dirs in %q", dir) + return fmt.Errorf("list block dirs in %q: %w", dir, err) } tmpFiles := make([]string, 0, len(dirs)) @@ -71,44 +70,54 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { repl, err := os.Create(filepath.Join(d, "index.repaired")) if err != nil { - return errors.Wrapf(err, "create index.repaired for block dir: %v", d) + return fmt.Errorf("create index.repaired for block dir: %v: %w", d, err) } tmpFiles = append(tmpFiles, repl.Name()) broken, err := os.Open(filepath.Join(d, indexFilename)) if err != nil { - return errors.Wrapf(err, "open broken index for block dir: %v", d) + return fmt.Errorf("open broken index for block dir: %v: %w", d, err) } if _, err := io.Copy(repl, broken); err != nil { - return errors.Wrapf(err, "copy content of index to index.repaired for block dir: %v", d) + return fmt.Errorf("copy content of index to index.repaired for block dir: %v: %w", d, err) } // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "rewrite of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Sync(); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "sync of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close repaired index for block dir: %v", d) + return fmt.Errorf("close repaired index for block dir: %v: %w", d, err) } if err := broken.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close broken index for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("close broken index for block dir: %v: %w", d, err) + } } if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil { - return errors.Wrapf(repl.Close(), "replaced broken index with index.repaired for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("replaced broken index with index.repaired for block dir: %v: %w", d, err) + } } // Reset version of meta.json to 1. meta.Version = metaVersion1 if _, err := writeMetaFile(logger, d, meta); err != nil { - return errors.Wrapf(repl.Close(), "write meta for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("write meta for block dir: %v: %w", d, err) + } } } return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index 0847f81a8a..bb8d49b202 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -30,6 +30,14 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { return r } +func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { + hs := GenerateTestHistograms(n) + for i := range hs { + hs[i].CounterResetHint = histogram.UnknownCounterReset + } + return hs +} + // GenerateTestHistogram but it is up to the user to set any known counter reset hint. func GenerateTestHistogram(i int) *histogram.Histogram { return &histogram.Histogram{ diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index bc7db35bf1..1509c9cd96 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -16,6 +16,7 @@ package tsdb import ( "bufio" "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -28,7 +29,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" @@ -210,7 +210,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { - return w, errors.Wrap(err, "removing segment failed") + return w, fmt.Errorf("removing segment failed: %w", err) } } break @@ -237,8 +237,8 @@ func (r *repairingWALReader) Read( if err == nil { return nil } - cerr, ok := errors.Cause(err).(walCorruptionErr) - if !ok { + var cerr *walCorruptionErr + if !errors.As(err, &cerr) { return err } r.wal.metrics.corruptions.Inc() @@ -309,7 +309,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Past WAL files are closed. We have to reopen them for another read. f, err := w.openSegmentFile(sf.Name()) if err != nil { - return errors.Wrap(err, "open old WAL segment for read") + return fmt.Errorf("open old WAL segment for read: %w", err) } candidates = append(candidates, &segmentFile{ File: f, @@ -326,7 +326,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Create a new tmp file. f, err := w.createSegmentFile(filepath.Join(w.dirFile.Name(), "compact.tmp")) if err != nil { - return errors.Wrap(err, "create compaction segment") + return fmt.Errorf("create compaction segment: %w", err) } defer func() { if err := os.RemoveAll(f.Name()); err != nil { @@ -352,7 +352,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) err := r.decodeSeries(flag, byt, &decSeries) if err != nil { - return errors.Wrap(err, "decode samples while truncating") + return fmt.Errorf("decode samples while truncating: %w", err) } for _, s := range decSeries { if keep(s.Ref) { @@ -367,11 +367,11 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "write to compaction segment") + return fmt.Errorf("write to compaction segment: %w", err) } } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read candidate WAL files") + if err := r.Err(); err != nil { + return fmt.Errorf("read candidate WAL files: %w", err) } off, err := csf.Seek(0, io.SeekCurrent) @@ -390,12 +390,12 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) _ = candidates[0].Close() // need close before remove on platform windows if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil { - return errors.Wrap(err, "rename compaction segment") + return fmt.Errorf("rename compaction segment: %w", err) } for _, f := range candidates[1:] { f.Close() // need close before remove on platform windows if err := os.RemoveAll(f.Name()); err != nil { - return errors.Wrap(err, "delete WAL segment file") + return fmt.Errorf("delete WAL segment file: %w", err) } } if err := w.dirFile.Sync(); err != nil { @@ -435,7 +435,7 @@ func (w *SegmentWAL) LogSeries(series []record.RefSeries) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -462,7 +462,7 @@ func (w *SegmentWAL) LogSamples(samples []record.RefSample) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -488,7 +488,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -523,7 +523,7 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { switch n, err := f.Read(metab); { case err != nil: - return nil, errors.Wrapf(err, "validate meta %q", f.Name()) + return nil, fmt.Errorf("validate meta %q: %w", f.Name(), err) case n != 8: return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name()) } @@ -573,16 +573,16 @@ func (w *SegmentWAL) cut() error { w.actorc <- func() error { off, err := hf.Seek(0, io.SeekCurrent) if err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Truncate(off); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Sync(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Close(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } return nil } @@ -600,7 +600,10 @@ func (w *SegmentWAL) cut() error { go func() { w.actorc <- func() error { - return errors.Wrap(w.dirFile.Sync(), "sync WAL directory") + if err := w.dirFile.Sync(); err != nil { + return fmt.Errorf("sync WAL directory: %w", err) + } + return nil } }() @@ -635,7 +638,7 @@ func (w *SegmentWAL) Sync() error { head = w.head() }() if err != nil { - return errors.Wrap(err, "flush buffer") + return fmt.Errorf("flush buffer: %w", err) } if head != nil { // But only fsync the head segment after releasing the mutex as it will block on disk I/O. @@ -726,11 +729,13 @@ func (w *SegmentWAL) Close() error { // only the current segment will still be open. if hf := w.head(); hf != nil { if err := hf.Close(); err != nil { - return errors.Wrapf(err, "closing WAL head %s", hf.Name()) + return fmt.Errorf("closing WAL head %s: %w", hf.Name(), err) } } - - return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) + if err := w.dirFile.Close(); err != nil { + return fmt.Errorf("closing WAL dir %s: %w", w.dirFile.Name(), err) + } + return nil } func (w *SegmentWAL) write(t WALEntryType, flag uint8, buf []byte) error { @@ -921,7 +926,7 @@ func (r *walReader) Read( err = r.decodeSeries(flag, b, &series) if err != nil { - err = errors.Wrap(err, "decode series entry") + err = fmt.Errorf("decode series entry: %w", err) break } datac <- series @@ -940,7 +945,7 @@ func (r *walReader) Read( err = r.decodeSamples(flag, b, &samples) if err != nil { - err = errors.Wrap(err, "decode samples entry") + err = fmt.Errorf("decode samples entry: %w", err) break } datac <- samples @@ -960,7 +965,7 @@ func (r *walReader) Read( err = r.decodeDeletes(flag, b, &deletes) if err != nil { - err = errors.Wrap(err, "decode delete entry") + err = fmt.Errorf("decode delete entry: %w", err) break } datac <- deletes @@ -982,8 +987,8 @@ func (r *walReader) Read( if err != nil { return err } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read entry") + if err := r.Err(); err != nil { + return fmt.Errorf("read entry: %w", err) } return nil } @@ -1046,12 +1051,16 @@ type walCorruptionErr struct { lastOffset int64 } -func (e walCorruptionErr) Error() string { +func (e *walCorruptionErr) Error() string { return fmt.Sprintf("%s ", e.err, e.file, e.lastOffset) } +func (e *walCorruptionErr) Unwrap() error { + return e.err +} + func (r *walReader) corruptionErr(s string, args ...interface{}) error { - return walCorruptionErr{ + return &walCorruptionErr{ err: fmt.Errorf(s, args...), file: r.cur, lastOffset: r.lastOffset, @@ -1152,8 +1161,8 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) }) } - if dec.Err() != nil { - return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res)) + if err := dec.Err(); err != nil { + return fmt.Errorf("decode error after %d samples: %w", len(*res), err) } if len(dec.B) > 0 { return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) @@ -1185,7 +1194,7 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // Detect whether we still have the old WAL. fns, err := sequenceFiles(dir) if err != nil && !os.IsNotExist(err) { - return false, errors.Wrap(err, "list sequence files") + return false, fmt.Errorf("list sequence files: %w", err) } if len(fns) == 0 { return false, nil // No WAL at all yet. @@ -1194,13 +1203,13 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // old WAL. f, err := os.Open(fns[0]) if err != nil { - return false, errors.Wrap(err, "check first existing segment") + return false, fmt.Errorf("check first existing segment: %w", err) } defer f.Close() var hdr [4]byte - if _, err := f.Read(hdr[:]); err != nil && err != io.EOF { - return false, errors.Wrap(err, "read header from first segment") + if _, err := f.Read(hdr[:]); err != nil && !errors.Is(err, io.EOF) { + return false, fmt.Errorf("read header from first segment: %w", err) } // If we cannot read the magic header for segments of the old WAL, abort. // Either it's migrated already or there's a corruption issue with which @@ -1223,11 +1232,11 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { - return errors.Wrap(err, "cleanup replacement dir") + return fmt.Errorf("cleanup replacement dir: %w", err) } repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone) if err != nil { - return errors.Wrap(err, "open new WAL") + return fmt.Errorf("open new WAL: %w", err) } // It should've already been closed as part of the previous finalization. @@ -1240,7 +1249,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) if err != nil { - return errors.Wrap(err, "open old WAL") + return fmt.Errorf("open old WAL: %w", err) } defer w.Close() @@ -1271,22 +1280,22 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { }, ) if decErr != nil { - return errors.Wrap(err, "decode old entries") + return fmt.Errorf("decode old entries: %w", err) } if err != nil { - return errors.Wrap(err, "write new entries") + return fmt.Errorf("write new entries: %w", err) } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := w.Close(); err != nil { - return errors.Wrap(err, "close old WAL") + return fmt.Errorf("close old WAL: %w", err) } if err := repl.Close(); err != nil { - return errors.Wrap(err, "close new WAL") + return fmt.Errorf("close new WAL: %w", err) } if err := fileutil.Replace(tmpdir, dir); err != nil { - return errors.Wrap(err, "replace old WAL") + return fmt.Errorf("replace old WAL: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index c9f8a4599f..1c76e38877 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -14,6 +14,7 @@ package wlog import ( + "errors" "fmt" "io" "math" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -264,7 +264,7 @@ func (w *Watcher) loop() { func (w *Watcher) Run() error { _, lastSegment, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "wal.Segments") + return fmt.Errorf("wal.Segments: %w", err) } // We want to ensure this is false across iterations since @@ -275,13 +275,13 @@ func (w *Watcher) Run() error { // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if err == nil { if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } } w.lastCheckpoint = lastCheckpoint @@ -371,7 +371,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) @@ -380,7 +380,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } return nil @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { var err error size, err = getSegmentSize(w.walDir, segmentNum) if err != nil { - return errors.Wrap(err, "getSegmentSize") + return fmt.Errorf("getSegmentSize: %w", err) } return w.readAndHandleError(reader, segmentNum, tail, size) @@ -447,7 +447,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { case <-segmentTicker.C: _, last, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "segments") + return fmt.Errorf("segments: %w", err) } // Check if new segments exists. @@ -459,7 +459,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { switch { - case err != nil && errors.Cause(err) != io.EOF: + case err != nil && !errors.Is(err, io.EOF): level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) @@ -468,7 +468,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } @@ -497,8 +497,8 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { func (w *Watcher) garbageCollectSeries(segmentNum int) error { dir, _, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if dir == "" || dir == w.lastCheckpoint { @@ -508,7 +508,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { index, err := checkpointNum(dir) if err != nil { - return errors.Wrap(err, "error parsing checkpoint filename") + return fmt.Errorf("error parsing checkpoint filename: %w", err) } if index >= segmentNum { @@ -519,7 +519,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } // Clear series with a checkpoint or segment index # lower than the checkpoint we just read. @@ -658,7 +658,10 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } // Go through all series in a segment updating the segmentNum, so we can delete older series. @@ -691,7 +694,10 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } func (w *Watcher) SetStartTime(t time.Time) { @@ -706,29 +712,29 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { - return errors.Wrap(err, "checkpointNum") + return fmt.Errorf("checkpointNum: %w", err) } // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := w.segments(checkpointDir) if err != nil { - return errors.Wrap(err, "Unable to get segments checkpoint dir") + return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) } for _, seg := range segs { size, err := getSegmentSize(checkpointDir, seg) if err != nil { - return errors.Wrap(err, "getSegmentSize") + return fmt.Errorf("getSegmentSize: %w", err) } sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) if err != nil { - return errors.Wrap(err, "unable to open segment") + return fmt.Errorf("unable to open segment: %w", err) } defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); errors.Cause(err) != io.EOF && err != nil { - return errors.Wrap(err, "readSegment") + if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("readSegment: %w", err) } if r.Offset() != size { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index c3ae001d98..fdea756945 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -116,6 +116,10 @@ func (e *CorruptionErr) Error() string { return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err) } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 671df78872..dd35d1fe99 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -41,7 +41,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -1114,7 +1114,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } // If no metric is specified, get the full list for the target. if metric == "" { - for _, md := range t.MetadataList() { + for _, md := range t.ListMetadata() { res = append(res, metricMetadata{ Target: t.Labels(), Metric: md.Metric, @@ -1126,7 +1126,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { continue } // Get metadata for the specified metric. - if md, ok := t.Metadata(metric); ok { + if md, ok := t.GetMetadata(metric); ok { res = append(res, metricMetadata{ Target: t.Labels(), Type: md.Type, @@ -1141,11 +1141,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } type metricMetadata struct { - Target labels.Labels `json:"target"` - Metric string `json:"metric,omitempty"` - Type textparse.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` + Target labels.Labels `json:"target"` + Metric string `json:"metric,omitempty"` + Type model.MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` } // AlertmanagerDiscovery has all the active Alertmanagers. @@ -1221,14 +1221,8 @@ func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert { return apiAlerts } -type metadata struct { - Type textparse.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` -} - func (api *API) metricMetadata(r *http.Request) apiFuncResult { - metrics := map[string]map[metadata]struct{}{} + metrics := map[string]map[metadata.Metadata]struct{}{} limit := -1 if s := r.FormValue("limit"); s != "" { @@ -1249,8 +1243,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { if metric == "" { - for _, mm := range t.MetadataList() { - m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} + for _, mm := range t.ListMetadata() { + m := metadata.Metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} ms, ok := metrics[mm.Metric] if limitPerMetric > 0 && len(ms) >= limitPerMetric { @@ -1258,7 +1252,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } if !ok { - ms = map[metadata]struct{}{} + ms = map[metadata.Metadata]struct{}{} metrics[mm.Metric] = ms } ms[m] = struct{}{} @@ -1266,8 +1260,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { continue } - if md, ok := t.Metadata(metric); ok { - m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} + if md, ok := t.GetMetadata(metric); ok { + m := metadata.Metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} ms, ok := metrics[md.Metric] if limitPerMetric > 0 && len(ms) >= limitPerMetric { @@ -1275,7 +1269,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } if !ok { - ms = map[metadata]struct{}{} + ms = map[metadata.Metadata]struct{}{} metrics[md.Metric] = ms } ms[m] = struct{}{} @@ -1284,13 +1278,13 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } // Put the elements from the pseudo-set into a slice for marshaling. - res := map[string][]metadata{} + res := map[string][]metadata.Metadata{} for name, set := range metrics { if limit >= 0 && len(res) >= limit { break } - s := []metadata{} + s := []metadata.Metadata{} for metadata := range set { s = append(s, metadata) } diff --git a/vendor/github.com/thanos-io/promql-engine/engine/distributed.go b/vendor/github.com/thanos-io/promql-engine/engine/distributed.go new file mode 100644 index 0000000000..337b4e08ad --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/engine/distributed.go @@ -0,0 +1,89 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package engine + +import ( + "context" + "time" + + "github.com/thanos-io/promql-engine/api" + "github.com/thanos-io/promql-engine/logicalplan" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + v1 "github.com/prometheus/prometheus/web/api/v1" +) + +type remoteEngine struct { + q storage.Queryable + engine *compatibilityEngine + labelSets []labels.Labels + maxt int64 + mint int64 +} + +func NewRemoteEngine(opts Opts, q storage.Queryable, mint, maxt int64, labelSets []labels.Labels) *remoteEngine { + return &remoteEngine{ + q: q, + labelSets: labelSets, + maxt: maxt, + mint: mint, + engine: New(opts), + } +} + +func (l remoteEngine) MaxT() int64 { + return l.maxt +} + +func (l remoteEngine) MinT() int64 { + return l.mint +} + +func (l remoteEngine) LabelSets() []labels.Labels { + return l.labelSets +} + +func (l remoteEngine) NewRangeQuery(ctx context.Context, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { + return l.engine.NewRangeQuery(ctx, l.q, opts, qs, start, end, interval) +} + +type distributedEngine struct { + endpoints api.RemoteEndpoints + remoteEngine *compatibilityEngine +} + +func NewDistributedEngine(opts Opts, endpoints api.RemoteEndpoints) v1.QueryEngine { + opts.LogicalOptimizers = []logicalplan.Optimizer{ + logicalplan.PassthroughOptimizer{Endpoints: endpoints}, + logicalplan.DistributeAvgOptimizer{}, + logicalplan.DistributedExecutionOptimizer{Endpoints: endpoints}, + } + + return &distributedEngine{ + endpoints: endpoints, + remoteEngine: New(opts), + } +} + +func (l distributedEngine) SetQueryLogger(log promql.QueryLogger) {} + +func (l distributedEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + // Truncate milliseconds to avoid mismatch in timestamps between remote and local engines. + // Some clients might only support second precision when executing queries. + ts = ts.Truncate(time.Second) + + return l.remoteEngine.NewInstantQuery(ctx, q, opts, qs, ts) +} + +func (l distributedEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { + // Truncate milliseconds to avoid mismatch in timestamps between remote and local engines. + // Some clients might only support second precision when executing queries. + start = start.Truncate(time.Second) + end = end.Truncate(time.Second) + interval = interval.Truncate(time.Second) + + return l.remoteEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) +} diff --git a/vendor/github.com/thanos-io/promql-engine/engine/engine.go b/vendor/github.com/thanos-io/promql-engine/engine/engine.go index 4b1d9a9107..dbfbdc8ac8 100644 --- a/vendor/github.com/thanos-io/promql-engine/engine/engine.go +++ b/vendor/github.com/thanos-io/promql-engine/engine/engine.go @@ -5,12 +5,9 @@ package engine import ( "context" - - "io" "math" "runtime" "sort" - "strconv" "time" "github.com/efficientgo/core/errors" @@ -18,14 +15,13 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" v1 "github.com/prometheus/prometheus/web/api/v1" - "github.com/thanos-io/promql-engine/api" "github.com/thanos-io/promql-engine/execution" "github.com/thanos-io/promql-engine/execution/function" "github.com/thanos-io/promql-engine/execution/model" @@ -61,11 +57,6 @@ type Opts struct { // in the new engine, instead of falling back to prometheus engine. DisableFallback bool - // DebugWriter specifies output for debug (multi-line) information meant for humans debugging the engine. - // If nil, nothing will be printed. - // NOTE: Users will not check the errors, debug writing is best effort. - DebugWriter io.Writer - // ExtLookbackDelta specifies what time range to use to determine valid previous sample for extended range functions. // Defaults to 1 hour if not specified. ExtLookbackDelta time.Duration @@ -74,10 +65,6 @@ type Opts struct { // This will default to false. EnableXFunctions bool - // EnableSubqueries enables the engine to handle subqueries without falling back to prometheus. - // This will default to false. - EnableSubqueries bool - // FallbackEngine Engine v1.QueryEngine @@ -100,78 +87,6 @@ func (o Opts) getLogicalOptimizers() []logicalplan.Optimizer { return optimizers } -type remoteEngine struct { - q storage.Queryable - engine *compatibilityEngine - labelSets []labels.Labels - maxt int64 - mint int64 -} - -func NewRemoteEngine(opts Opts, q storage.Queryable, mint, maxt int64, labelSets []labels.Labels) *remoteEngine { - return &remoteEngine{ - q: q, - labelSets: labelSets, - maxt: maxt, - mint: mint, - engine: New(opts), - } -} - -func (l remoteEngine) MaxT() int64 { - return l.maxt -} - -func (l remoteEngine) MinT() int64 { - return l.mint -} - -func (l remoteEngine) LabelSets() []labels.Labels { - return l.labelSets -} - -func (l remoteEngine) NewRangeQuery(ctx context.Context, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { - return l.engine.NewRangeQuery(ctx, l.q, opts, qs, start, end, interval) -} - -type distributedEngine struct { - endpoints api.RemoteEndpoints - remoteEngine *compatibilityEngine -} - -func NewDistributedEngine(opts Opts, endpoints api.RemoteEndpoints) v1.QueryEngine { - opts.LogicalOptimizers = []logicalplan.Optimizer{ - logicalplan.PassthroughOptimizer{Endpoints: endpoints}, - logicalplan.DistributeAvgOptimizer{}, - logicalplan.DistributedExecutionOptimizer{Endpoints: endpoints}, - } - - return &distributedEngine{ - endpoints: endpoints, - remoteEngine: New(opts), - } -} - -func (l distributedEngine) SetQueryLogger(log promql.QueryLogger) {} - -func (l distributedEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { - // Truncate milliseconds to avoid mismatch in timestamps between remote and local engines. - // Some clients might only support second precision when executing queries. - ts = ts.Truncate(time.Second) - - return l.remoteEngine.NewInstantQuery(ctx, q, opts, qs, ts) -} - -func (l distributedEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { - // Truncate milliseconds to avoid mismatch in timestamps between remote and local engines. - // Some clients might only support second precision when executing queries. - start = start.Truncate(time.Second) - end = end.Truncate(time.Second) - interval = interval.Truncate(time.Second) - - return l.remoteEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) -} - func New(opts Opts) *compatibilityEngine { if opts.Logger == nil { opts.Logger = log.NewNopLogger() @@ -231,7 +146,6 @@ func New(opts Opts) *compatibilityEngine { prom: engine, functions: functions, - debugWriter: opts.DebugWriter, disableFallback: opts.DisableFallback, logger: opts.Logger, lookbackDelta: opts.LookbackDelta, @@ -240,19 +154,23 @@ func New(opts Opts) *compatibilityEngine { metrics: metrics, extLookbackDelta: opts.ExtLookbackDelta, enableAnalysis: opts.EnableAnalysis, - enableSubqueries: opts.EnableSubqueries, noStepSubqueryIntervalFn: func(d time.Duration) time.Duration { return time.Duration(opts.NoStepSubqueryIntervalFn(d.Milliseconds()) * 1000000) }, } } +var ( + // Duplicate label checking logic uses a bitmap with 64 bits currently. + // As long as we use this method we need to have batches that are smaller + // then 64 steps. + ErrStepsBatchTooLarge = errors.New("'StepsBatch' must be less than 64") +) + type compatibilityEngine struct { prom v1.QueryEngine functions map[string]*parser.Function - debugWriter io.Writer - disableFallback bool logger log.Logger lookbackDelta time.Duration @@ -262,7 +180,6 @@ type compatibilityEngine struct { extLookbackDelta time.Duration enableAnalysis bool - enableSubqueries bool noStepSubqueryIntervalFn func(time.Duration) time.Duration } @@ -297,11 +214,13 @@ func (e *compatibilityEngine) NewInstantQuery(ctx context.Context, q storage.Que LookbackDelta: opts.LookbackDelta(), ExtLookbackDelta: e.extLookbackDelta, EnableAnalysis: e.enableAnalysis, - EnableSubqueries: e.enableSubqueries, NoStepSubqueryIntervalFn: e.noStepSubqueryIntervalFn, } + if qOpts.StepsBatch > 64 { + return nil, ErrStepsBatchTooLarge + } - lplan := logicalplan.New(expr, qOpts).Optimize(e.logicalOptimizers) + lplan, warns := logicalplan.New(expr, qOpts).Optimize(e.logicalOptimizers) exec, err := execution.New(lplan.Expr(), q, qOpts) if e.triggerFallback(err) { e.metrics.queries.WithLabelValues("true").Inc() @@ -312,18 +231,14 @@ func (e *compatibilityEngine) NewInstantQuery(ctx context.Context, q storage.Que return nil, err } - if e.debugWriter != nil { - explain(e.debugWriter, exec, "", "") - } - return &compatibilityQuery{ - Query: &Query{exec: exec, opts: opts}, - engine: e, - expr: expr, - ts: ts, - t: InstantQuery, - resultSort: resultSort, - debugWriter: e.debugWriter, + Query: &Query{exec: exec, opts: opts}, + engine: e, + expr: expr, + ts: ts, + warns: warns, + t: InstantQuery, + resultSort: resultSort, }, nil } @@ -354,11 +269,13 @@ func (e *compatibilityEngine) NewRangeQuery(ctx context.Context, q storage.Query LookbackDelta: opts.LookbackDelta(), ExtLookbackDelta: e.extLookbackDelta, EnableAnalysis: e.enableAnalysis, - EnableSubqueries: false, // not yet implemented for range queries. NoStepSubqueryIntervalFn: e.noStepSubqueryIntervalFn, } + if qOpts.StepsBatch > 64 { + return nil, ErrStepsBatchTooLarge + } - lplan := logicalplan.New(expr, qOpts).Optimize(e.logicalOptimizers) + lplan, warns := logicalplan.New(expr, qOpts).Optimize(e.logicalOptimizers) exec, err := execution.New(lplan.Expr(), q, qOpts) if e.triggerFallback(err) { e.metrics.queries.WithLabelValues("true").Inc() @@ -369,38 +286,15 @@ func (e *compatibilityEngine) NewRangeQuery(ctx context.Context, q storage.Query return nil, err } - if e.debugWriter != nil { - explain(e.debugWriter, exec, "", "") - } - return &compatibilityQuery{ - Query: &Query{exec: exec, opts: opts}, - engine: e, - expr: expr, - t: RangeQuery, - debugWriter: e.debugWriter, + Query: &Query{exec: exec, opts: opts}, + engine: e, + expr: expr, + warns: warns, + t: RangeQuery, }, nil } -type ExplainableQuery interface { - promql.Query - - Explain() *ExplainOutputNode - Analyze() *AnalyzeOutputNode -} - -type AnalyzeOutputNode struct { - OperatorTelemetry model.OperatorTelemetry `json:"telemetry,omitempty"` - Children []AnalyzeOutputNode `json:"children,omitempty"` -} - -type ExplainOutputNode struct { - OperatorName string `json:"name,omitempty"` - Children []ExplainOutputNode `json:"children,omitempty"` -} - -var _ ExplainableQuery = &compatibilityQuery{} - type Query struct { exec model.VectorOperator opts promql.QueryOpts @@ -414,164 +308,37 @@ func (q *Query) Explain() *ExplainOutputNode { func (q *Query) Analyze() *AnalyzeOutputNode { if observableRoot, ok := q.exec.(model.ObservableVectorOperator); ok { - return analyzeVector(observableRoot) + return analyzeQuery(observableRoot) } return nil } -func analyzeVector(obsv model.ObservableVectorOperator) *AnalyzeOutputNode { - telemetry, obsVectors := obsv.Analyze() - - var children []AnalyzeOutputNode - for _, vector := range obsVectors { - children = append(children, *analyzeVector(vector)) - } - - return &AnalyzeOutputNode{ - OperatorTelemetry: telemetry, - Children: children, - } -} - -func explainVector(v model.VectorOperator) *ExplainOutputNode { - name, vectors := v.Explain() - - var children []ExplainOutputNode - for _, vector := range vectors { - children = append(children, *explainVector(vector)) - } - - return &ExplainOutputNode{ - OperatorName: name, - Children: children, - } -} - -type sortOrder bool - -const ( - sortOrderAsc sortOrder = false - sortOrderDesc sortOrder = true -) - -type resultSorter interface { - comparer(samples *promql.Vector) func(i, j int) bool -} - -type sortFuncResultSort struct { - sortOrder sortOrder -} - -type aggregateResultSort struct { - sortingLabels []string - groupBy bool - - sortOrder sortOrder -} - -type noSortResultSort struct { -} - -func newResultSort(expr parser.Expr) resultSorter { - switch texpr := expr.(type) { - case *parser.Call: - switch texpr.Func.Name { - case "sort": - return sortFuncResultSort{sortOrder: sortOrderAsc} - case "sort_desc": - return sortFuncResultSort{sortOrder: sortOrderDesc} - } - case *parser.AggregateExpr: - switch texpr.Op { - case parser.TOPK: - return aggregateResultSort{ - sortingLabels: texpr.Grouping, - sortOrder: sortOrderDesc, - groupBy: !texpr.Without, - } - case parser.BOTTOMK: - return aggregateResultSort{ - sortingLabels: texpr.Grouping, - sortOrder: sortOrderAsc, - groupBy: !texpr.Without, - } - } - } - return noSortResultSort{} -} -func (s noSortResultSort) comparer(samples *promql.Vector) func(i, j int) bool { - return func(i, j int) bool { return i < j } -} - -func valueCompare(order sortOrder, l, r float64) bool { - if math.IsNaN(r) { - return true - } - if order == sortOrderAsc { - return l < r - } - return l > r -} - -func (s sortFuncResultSort) comparer(samples *promql.Vector) func(i, j int) bool { - return func(i, j int) bool { - return valueCompare(s.sortOrder, (*samples)[i].F, (*samples)[j].F) - } -} - -func (s aggregateResultSort) comparer(samples *promql.Vector) func(i, j int) bool { - return func(i int, j int) bool { - var iLbls labels.Labels - var jLbls labels.Labels - iLb := labels.NewBuilder((*samples)[i].Metric) - jLb := labels.NewBuilder((*samples)[j].Metric) - if s.groupBy { - iLbls = iLb.Keep(s.sortingLabels...).Labels() - jLbls = jLb.Keep(s.sortingLabels...).Labels() - } else { - iLbls = iLb.Del(s.sortingLabels...).Labels() - jLbls = jLb.Del(s.sortingLabels...).Labels() - } - - lblsCmp := labels.Compare(iLbls, jLbls) - if lblsCmp != 0 { - return lblsCmp < 0 - } - return valueCompare(s.sortOrder, (*samples)[i].F, (*samples)[j].F) - } -} - type compatibilityQuery struct { *Query - engine *compatibilityEngine - expr parser.Expr - ts time.Time // Empty for range queries. + engine *compatibilityEngine + expr parser.Expr + ts time.Time // Empty for range queries. + warns annotations.Annotations + t QueryType resultSort resultSorter - - cancel context.CancelFunc - - debugWriter io.Writer + cancel context.CancelFunc } func (q *compatibilityQuery) Exec(ctx context.Context) (ret *promql.Result) { ctx = warnings.NewContext(ctx) defer func() { - if warns := warnings.FromContext(ctx); len(warns) > 0 { - ret.Warnings = warns - } + ret.Warnings = ret.Warnings.Merge(warnings.FromContext(ctx)) }() // Handle case with strings early on as this does not need us to process samples. switch e := q.expr.(type) { case *parser.StringLiteral: - if q.debugWriter != nil { - analyze(q.debugWriter, q.exec.(model.ObservableVectorOperator), " ", "") - } return &promql.Result{Value: promql.String{V: e.Val, T: q.ts.UnixMilli()}} } ret = &promql.Result{ - Value: promql.Vector{}, + Value: promql.Vector{}, + Warnings: q.warns, } defer recoverEngine(q.engine.logger, q.expr, &ret.Err) @@ -588,8 +355,8 @@ func (q *compatibilityQuery) Exec(ctx context.Context) (ret *promql.Result) { } series := make([]promql.Series, len(resultSeries)) - for i := 0; i < len(resultSeries); i++ { - series[i].Metric = resultSeries[i] + for i, s := range resultSeries { + series[i].Metric = s } loop: for { @@ -638,32 +405,25 @@ loop: // For range Query we expect always a Matrix value type. if q.t == RangeQuery { - resultMatrix := make(promql.Matrix, 0, len(series)) + matrix := make(promql.Matrix, 0, len(series)) for _, s := range series { if len(s.Floats)+len(s.Histograms) == 0 { continue } - resultMatrix = append(resultMatrix, s) + matrix = append(matrix, s) } - sort.Sort(resultMatrix) - if resultMatrix.ContainsSameLabelset() { + sort.Sort(matrix) + if matrix.ContainsSameLabelset() { return newErrResult(ret, extlabels.ErrDuplicateLabelSet) } - ret.Value = resultMatrix - if q.debugWriter != nil { - analyze(q.debugWriter, q.exec.(model.ObservableVectorOperator), "", "") - } + ret.Value = matrix return ret } var result parser.Value switch q.expr.Type() { case parser.ValueTypeMatrix: - matrix := promql.Matrix(series) - if matrix.ContainsSameLabelset() { - return newErrResult(ret, extlabels.ErrDuplicateLabelSet) - } - result = matrix + result = promql.Matrix(series) case parser.ValueTypeVector: // Convert matrix with one value per series into vector. vector := make(promql.Vector, 0, len(resultSeries)) @@ -688,9 +448,6 @@ loop: } } sort.Slice(vector, q.resultSort.comparer(&vector)) - if vector.ContainsSameLabelset() { - return newErrResult(ret, extlabels.ErrDuplicateLabelSet) - } result = vector case parser.ValueTypeScalar: v := math.NaN() @@ -762,48 +519,3 @@ func recoverEngine(logger log.Logger, expr parser.Expr, errp *error) { *errp = errors.Wrap(err, "unexpected error") } } - -func analyze(w io.Writer, o model.ObservableVectorOperator, indent, indentNext string) { - telemetry, next := o.Analyze() - _, _ = w.Write([]byte(indent)) - _, _ = w.Write([]byte("Operator Time :")) - _, _ = w.Write([]byte(strconv.FormatInt(int64(telemetry.ExecutionTimeTaken()), 10))) - if len(next) == 0 { - _, _ = w.Write([]byte("\n")) - return - } - _, _ = w.Write([]byte(":\n")) - - for i, n := range next { - if i == len(next)-1 { - analyze(w, n, indentNext+"└──", indentNext+" ") - } else { - analyze(w, n, indentNext+"└──", indentNext+" ") - } - } -} - -func explain(w io.Writer, o model.VectorOperator, indent, indentNext string) { - me, next := o.Explain() - _, _ = w.Write([]byte(indent)) - _, _ = w.Write([]byte(me)) - if len(next) == 0 { - _, _ = w.Write([]byte("\n")) - return - } - - if me == "[*CancellableOperator]" { - _, _ = w.Write([]byte(": ")) - explain(w, next[0], "", indentNext) - return - } - _, _ = w.Write([]byte(":\n")) - - for i, n := range next { - if i == len(next)-1 { - explain(w, n, indentNext+"└──", indentNext+" ") - } else { - explain(w, n, indentNext+"├──", indentNext+"│ ") - } - } -} diff --git a/vendor/github.com/thanos-io/promql-engine/engine/explain.go b/vendor/github.com/thanos-io/promql-engine/engine/explain.go new file mode 100644 index 0000000000..61630b9b2b --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/engine/explain.go @@ -0,0 +1,58 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package engine + +import ( + "github.com/prometheus/prometheus/promql" + + "github.com/thanos-io/promql-engine/execution/model" +) + +type ExplainableQuery interface { + promql.Query + + Explain() *ExplainOutputNode + Analyze() *AnalyzeOutputNode +} + +type AnalyzeOutputNode struct { + OperatorTelemetry model.OperatorTelemetry `json:"telemetry,omitempty"` + Children []AnalyzeOutputNode `json:"children,omitempty"` +} + +type ExplainOutputNode struct { + OperatorName string `json:"name,omitempty"` + Children []ExplainOutputNode `json:"children,omitempty"` +} + +var _ ExplainableQuery = &compatibilityQuery{} + +func analyzeQuery(obsv model.ObservableVectorOperator) *AnalyzeOutputNode { + _, children := obsv.Explain() + var childTelemetry []AnalyzeOutputNode + for _, child := range children { + if obsChild, ok := child.(model.ObservableVectorOperator); ok { + childTelemetry = append(childTelemetry, *analyzeQuery(obsChild)) + } + } + + return &AnalyzeOutputNode{ + OperatorTelemetry: obsv, + Children: childTelemetry, + } +} + +func explainVector(v model.VectorOperator) *ExplainOutputNode { + name, vectors := v.Explain() + + var children []ExplainOutputNode + for _, vector := range vectors { + children = append(children, *explainVector(vector)) + } + + return &ExplainOutputNode{ + OperatorName: name, + Children: children, + } +} diff --git a/vendor/github.com/thanos-io/promql-engine/engine/sort.go b/vendor/github.com/thanos-io/promql-engine/engine/sort.go new file mode 100644 index 0000000000..130c7e9ce8 --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/engine/sort.go @@ -0,0 +1,106 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package engine + +import ( + "math" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" +) + +type sortOrder bool + +const ( + sortOrderAsc sortOrder = false + sortOrderDesc sortOrder = true +) + +type resultSorter interface { + comparer(samples *promql.Vector) func(i, j int) bool +} + +type sortFuncResultSort struct { + sortOrder sortOrder +} + +type aggregateResultSort struct { + sortingLabels []string + groupBy bool + + sortOrder sortOrder +} + +type noSortResultSort struct { +} + +func newResultSort(expr parser.Expr) resultSorter { + switch texpr := expr.(type) { + case *parser.Call: + switch texpr.Func.Name { + case "sort": + return sortFuncResultSort{sortOrder: sortOrderAsc} + case "sort_desc": + return sortFuncResultSort{sortOrder: sortOrderDesc} + } + case *parser.AggregateExpr: + switch texpr.Op { + case parser.TOPK: + return aggregateResultSort{ + sortingLabels: texpr.Grouping, + sortOrder: sortOrderDesc, + groupBy: !texpr.Without, + } + case parser.BOTTOMK: + return aggregateResultSort{ + sortingLabels: texpr.Grouping, + sortOrder: sortOrderAsc, + groupBy: !texpr.Without, + } + } + } + return noSortResultSort{} +} +func (s noSortResultSort) comparer(samples *promql.Vector) func(i, j int) bool { + return func(i, j int) bool { return i < j } +} + +func valueCompare(order sortOrder, l, r float64) bool { + if math.IsNaN(r) { + return true + } + if order == sortOrderAsc { + return l < r + } + return l > r +} + +func (s sortFuncResultSort) comparer(samples *promql.Vector) func(i, j int) bool { + return func(i, j int) bool { + return valueCompare(s.sortOrder, (*samples)[i].F, (*samples)[j].F) + } +} + +func (s aggregateResultSort) comparer(samples *promql.Vector) func(i, j int) bool { + return func(i int, j int) bool { + var iLbls labels.Labels + var jLbls labels.Labels + iLb := labels.NewBuilder((*samples)[i].Metric) + jLb := labels.NewBuilder((*samples)[j].Metric) + if s.groupBy { + iLbls = iLb.Keep(s.sortingLabels...).Labels() + jLbls = jLb.Keep(s.sortingLabels...).Labels() + } else { + iLbls = iLb.Del(s.sortingLabels...).Labels() + jLbls = jLb.Del(s.sortingLabels...).Labels() + } + + lblsCmp := labels.Compare(iLbls, jLbls) + if lblsCmp != 0 { + return lblsCmp < 0 + } + return valueCompare(s.sortOrder, (*samples)[i].F, (*samples)[j].F) + } +} diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/accumulator.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/accumulator.go index fa808359af..c9f3a73383 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/accumulator.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/accumulator.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + package aggregate import ( @@ -78,106 +81,129 @@ func (s *sumAcc) Reset(_ float64) { s.value = 0 } -type genericAcc struct { - zeroVal float64 +func newMaxAcc() *maxAcc { + return &maxAcc{} +} + +type maxAcc struct { value float64 hasValue bool - skipNaNs bool - - aggregate func(float64, float64) float64 - vectorAggregate func([]float64, []*histogram.FloatHistogram) float64 } -func maxAggregate(a, b float64) float64 { - if a > b { - return a +func (c *maxAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) { + if len(vs) == 0 { + return } - return b -} -func maxVecAggregate(fs []float64, _ []*histogram.FloatHistogram) float64 { - return floats.Max(fs) + fst, rem := vs[0], vs[1:] + c.Add(fst, nil) + if len(rem) == 0 { + return + } + c.Add(floats.Max(rem), nil) } -func minAggregate(a, b float64) float64 { - if a < b { - return a +func (c *maxAcc) Add(v float64, h *histogram.FloatHistogram) { + if !c.hasValue { + c.value = v + c.hasValue = true + return + } + if c.value < v || math.IsNaN(c.value) { + c.value = v } - return b } -func minVecAggregate(fs []float64, _ []*histogram.FloatHistogram) float64 { - return floats.Min(fs) + +func (c *maxAcc) Value() (float64, *histogram.FloatHistogram) { + return c.value, nil } -func groupAggregate(_, _ float64) float64 { return 1 } -func groupVecAggregate(_ []float64, _ []*histogram.FloatHistogram) float64 { - return 1 +func (c *maxAcc) HasValue() bool { + return c.hasValue } -func newMaxAcc() *genericAcc { - return &genericAcc{ - skipNaNs: true, - zeroVal: math.MinInt64, - aggregate: maxAggregate, - vectorAggregate: maxVecAggregate, - } +func (c *maxAcc) Reset(_ float64) { + c.hasValue = false + c.value = 0 } -func newMinAcc() *genericAcc { - return &genericAcc{ - skipNaNs: true, - zeroVal: math.MaxInt64, - aggregate: minAggregate, - vectorAggregate: minVecAggregate, - } +func newMinAcc() *minAcc { + return &minAcc{} } -func newGroupAcc() *genericAcc { - return &genericAcc{ - zeroVal: 1, - aggregate: groupAggregate, - vectorAggregate: groupVecAggregate, - } +type minAcc struct { + value float64 + hasValue bool } -func (g *genericAcc) Add(v float64, _ *histogram.FloatHistogram) { - if g.skipNaNs && math.IsNaN(v) { +func (c *minAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) { + if len(vs) == 0 { return } - if !g.hasValue { - g.value = g.aggregate(g.zeroVal, v) - g.hasValue = true + fst, rem := vs[0], vs[1:] + c.Add(fst, nil) + if len(rem) == 0 { return } - g.hasValue = true - g.value = g.aggregate(g.value, v) + c.Add(floats.Min(rem), nil) } -func (g *genericAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) { - if len(vs) == 0 && len(hs) == 0 { +func (c *minAcc) Add(v float64, h *histogram.FloatHistogram) { + if !c.hasValue { + c.value = v + c.hasValue = true return } + if c.value > v || math.IsNaN(c.value) { + c.value = v + } +} - if !g.hasValue || math.IsNaN(g.value) { - g.value = g.vectorAggregate(vs, hs) - g.hasValue = true +func (c *minAcc) Value() (float64, *histogram.FloatHistogram) { + return c.value, nil +} + +func (c *minAcc) HasValue() bool { + return c.hasValue +} + +func (c *minAcc) Reset(_ float64) { + c.hasValue = false + c.value = 0 +} + +func newGroupAcc() *groupAcc { + return &groupAcc{} +} + +type groupAcc struct { + value float64 + hasValue bool +} + +func (c *groupAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) { + if len(vs) == 0 && len(hs) == 0 { return } - current := g.value - g.value = g.aggregate(current, g.vectorAggregate(vs, hs)) - g.hasValue = true + c.hasValue = true + c.value = 1 +} + +func (c *groupAcc) Add(v float64, h *histogram.FloatHistogram) { + c.hasValue = true + c.value = 1 } -func (g *genericAcc) Value() (float64, *histogram.FloatHistogram) { - return g.value, nil +func (c *groupAcc) Value() (float64, *histogram.FloatHistogram) { + return c.value, nil } -func (g *genericAcc) HasValue() bool { - return g.hasValue +func (c *groupAcc) HasValue() bool { + return c.hasValue } -func (g *genericAcc) Reset(_ float64) { - g.hasValue = false - g.value = 0 +func (c *groupAcc) Reset(_ float64) { + c.hasValue = false + c.value = 0 } type countAcc struct { @@ -346,7 +372,7 @@ func (q *quantileAcc) Add(v float64, h *histogram.FloatHistogram) { } func (q *quantileAcc) Value() (float64, *histogram.FloatHistogram) { - return quantile(q.arg, q.points), nil + return Quantile(q.arg, q.points), nil } func (q *quantileAcc) HasValue() bool { diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go index 4f25e8bd56..89627e08a5 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go @@ -11,9 +11,9 @@ import ( "time" "github.com/efficientgo/core/errors" - "github.com/prometheus/prometheus/model/labels" "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" @@ -59,8 +59,8 @@ func NewHashAggregate( // Grouping labels need to be sorted in order for metric hashing to work. // https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181 slices.Sort(labels) - a := &aggregate{ - OperatorTelemetry: &model.TrackedTelemetry{}, + return &aggregate{ + OperatorTelemetry: model.NewTelemetry("[aggregate]", opts.EnableAnalysis), next: next, paramOp: paramOp, @@ -70,21 +70,7 @@ func NewHashAggregate( aggregation: aggregation, labels: labels, stepsBatch: opts.StepsBatch, - } - - return a, nil -} - -func (a *aggregate) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - a.SetName("[*aggregate]") - var ops []model.ObservableVectorOperator - if obsnextParamOp, ok := a.paramOp.(model.ObservableVectorOperator); ok { - ops = append(ops, obsnextParamOp) - } - if obsnext, ok := a.next.(model.ObservableVectorOperator); ok { - ops = append(ops, obsnext) - } - return a, ops + }, nil } func (a *aggregate) Explain() (me string, next []model.VectorOperator) { @@ -94,12 +80,15 @@ func (a *aggregate) Explain() (me string, next []model.VectorOperator) { } ops = append(ops, a.next) if a.by { - return fmt.Sprintf("[*aggregate] %v by (%v)", a.aggregation.String(), a.labels), ops + return fmt.Sprintf("[aggregate] %v by (%v)", a.aggregation.String(), a.labels), ops } - return fmt.Sprintf("[*aggregate] %v without (%v)", a.aggregation.String(), a.labels), ops + return fmt.Sprintf("[aggregate] %v without (%v)", a.aggregation.String(), a.labels), ops } func (a *aggregate) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { a.AddExecutionTimeTaken(time.Since(start)) }() + var err error a.once.Do(func() { err = a.initializeTables(ctx) }) if err != nil { @@ -114,13 +103,14 @@ func (a *aggregate) GetPool() *model.VectorPool { } func (a *aggregate) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { a.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() - defer func() { a.AddExecutionTimeTaken(time.Since(start)) }() var err error a.once.Do(func() { err = a.initializeTables(ctx) }) @@ -247,7 +237,7 @@ func (a *aggregate) initializeScalarTables(ctx context.Context) ([]aggregateTabl labelsMap[lblName] = struct{}{} } for i := 0; i < len(series); i++ { - hash, _, lbls := hashMetric(builder, series[i], !a.by, a.labels, labelsMap, hashingBuf) + hash, lbls := hashMetric(builder, series[i], !a.by, a.labels, labelsMap, hashingBuf) output, ok := outputMap[hash] if !ok { output = &model.Series{ diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go index 62cfe40a3a..6b63edd50d 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go @@ -23,6 +23,8 @@ import ( ) type kAggregate struct { + model.OperatorTelemetry + next model.VectorOperator paramOp model.VectorOperator // params holds the aggregate parameter for each step. @@ -39,7 +41,6 @@ type kAggregate struct { inputToHeap []*samplesHeap heaps []*samplesHeap compare func(float64, float64) bool - model.OperatorTelemetry } func NewKHashAggregate( @@ -66,29 +67,34 @@ func NewKHashAggregate( // https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181 slices.Sort(labels) - a := &kAggregate{ - next: next, - vectorPool: points, - by: by, - aggregation: aggregation, - labels: labels, - paramOp: paramOp, - compare: compare, - params: make([]float64, opts.StepsBatch), - } - a.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - a.OperatorTelemetry = &model.TrackedTelemetry{} - } - return a, nil + return &kAggregate{ + OperatorTelemetry: model.NewTelemetry("[kaggregate]", opts.EnableAnalysis), + next: next, + vectorPool: points, + by: by, + aggregation: aggregation, + labels: labels, + paramOp: paramOp, + compare: compare, + params: make([]float64, opts.StepsBatch), + }, nil } func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { a.AddExecutionTimeTaken(time.Since(start)) }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + in, err := a.next.Next(ctx) if err != nil { return nil, err } - start := time.Now() + args, err := a.paramOp.Next(ctx) if err != nil { return nil, err @@ -128,12 +134,14 @@ func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) { a.next.GetPool().PutStepVector(vector) } a.next.GetPool().PutVectors(in) - a.AddExecutionTimeTaken(time.Since(start)) return result, nil } func (a *kAggregate) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { a.AddExecutionTimeTaken(time.Since(start)) }() + var err error a.once.Do(func() { err = a.init(ctx) }) if err != nil { @@ -147,23 +155,11 @@ func (a *kAggregate) GetPool() *model.VectorPool { return a.vectorPool } -func (a *kAggregate) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - a.SetName("[*kaggregate]") - next := make([]model.ObservableVectorOperator, 0, 2) - if obsnextParamOp, ok := a.paramOp.(model.ObservableVectorOperator); ok { - next = append(next, obsnextParamOp) - } - if obsnext, ok := a.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - return a, next -} - func (a *kAggregate) Explain() (me string, next []model.VectorOperator) { if a.by { - return fmt.Sprintf("[*kaggregate] %v by (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} + return fmt.Sprintf("[kaggregate] %v by (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} } - return fmt.Sprintf("[*kaggregate] %v without (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} + return fmt.Sprintf("[kaggregate] %v without (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} } func (a *kAggregate) init(ctx context.Context) error { @@ -184,7 +180,7 @@ func (a *kAggregate) init(ctx context.Context) error { labelsMap[lblName] = struct{}{} } for i := 0; i < len(series); i++ { - hash, _, _ := hashMetric(builder, series[i], !a.by, a.labels, labelsMap, hashingBuf) + hash, _ := hashMetric(builder, series[i], !a.by, a.labels, labelsMap, hashingBuf) h, ok := heapsHash[hash] if !ok { h = &samplesHeap{compare: a.compare} @@ -198,21 +194,20 @@ func (a *kAggregate) init(ctx context.Context) error { return nil } -func (a *kAggregate) aggregate(t int64, result *[]model.StepVector, k int, SampleIDs []uint64, samples []float64) { - for i, sId := range SampleIDs { +func (a *kAggregate) aggregate(t int64, result *[]model.StepVector, k int, sampleIDs []uint64, samples []float64) { + for i, sId := range sampleIDs { h := a.inputToHeap[sId] - if h.Len() < k || h.compare(h.entries[0].total, samples[i]) || (math.IsNaN(h.entries[0].total) && !math.IsNaN(samples[i])) { - if k == 1 && h.Len() == 1 { - h.entries[0].sId = sId - h.entries[0].total = samples[i] - continue - } + switch { + case h.Len() < k: + heap.Push(h, &entry{sId: sId, total: samples[i]}) - if h.Len() == k { - heap.Pop(h) - } + case h.compare(h.entries[0].total, samples[i]) || (math.IsNaN(h.entries[0].total) && !math.IsNaN(samples[i])): + h.entries[0].sId = sId + h.entries[0].total = samples[i] - heap.Push(h, &entry{sId: sId, total: samples[i]}) + if k > 1 { + heap.Fix(h, 0) + } } } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go index 232514dbc4..d362ab83dc 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go @@ -128,7 +128,7 @@ func hashMetric( grouping []string, groupingSet map[string]struct{}, buf []byte, -) (uint64, string, labels.Labels) { +) (uint64, labels.Labels) { buf = buf[:0] builder.Reset() @@ -142,12 +142,12 @@ func hashMetric( } builder.Add(lbl.Name, lbl.Value) }) - key, bytes := metric.HashWithoutLabels(buf, grouping...) - return key, string(bytes), builder.Labels() + key, _ := metric.HashWithoutLabels(buf, grouping...) + return key, builder.Labels() } if len(grouping) == 0 { - return 0, "", labels.Labels{} + return 0, labels.Labels{} } metric.Range(func(lbl labels.Label) { @@ -156,8 +156,8 @@ func hashMetric( } builder.Add(lbl.Name, lbl.Value) }) - key, bytes := metric.HashForLabels(buf, grouping...) - return key, string(bytes), builder.Labels() + key, _ := metric.HashForLabels(buf, grouping...) + return key, builder.Labels() } func newScalarAccumulator(expr parser.ItemType) (accumulator, error) { @@ -186,7 +186,7 @@ func newScalarAccumulator(expr parser.ItemType) (accumulator, error) { return nil, errors.Wrap(parse.ErrNotSupportedExpr, msg) } -func quantile(q float64, points []float64) float64 { +func Quantile(q float64, points []float64) float64 { if len(points) == 0 || math.IsNaN(q) { return math.NaN() } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go b/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go index cb8cc21c05..7b5caf5818 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go @@ -28,6 +28,8 @@ const ( // scalarOperator evaluates expressions where one operand is a scalarOperator. type scalarOperator struct { + model.OperatorTelemetry + seriesOnce sync.Once series []labels.Labels @@ -45,7 +47,6 @@ type scalarOperator struct { // Keep the result if both sides are scalars. bothScalars bool - model.OperatorTelemetry } func NewScalar( @@ -70,7 +71,9 @@ func NewScalar( operandValIdx = 1 } - o := &scalarOperator{ + return &scalarOperator{ + OperatorTelemetry: model.NewTelemetry("[vectorScalarBinary]", opts.EnableAnalysis), + pool: pool, next: next, scalar: scalar, @@ -81,32 +84,18 @@ func NewScalar( operandValIdx: operandValIdx, returnBool: returnBool, bothScalars: scalarSide == ScalarSideBoth, - } - o.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - o.OperatorTelemetry = &model.TrackedTelemetry{} - } - return o, nil + }, nil } -func (o *scalarOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*scalarOperator]") - next := make([]model.ObservableVectorOperator, 0, 2) - if obsnext, ok := o.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - if obsnextScalar, ok := o.scalar.(model.ObservableVectorOperator); ok { - next = append(next, obsnextScalar) - } - return o, next -} - func (o *scalarOperator) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*scalarOperator] %s", parser.ItemTypeStr[o.opType]), []model.VectorOperator{o.next, o.scalar} + return fmt.Sprintf("[vectorScalarBinary] %s", parser.ItemTypeStr[o.opType]), []model.VectorOperator{o.next, o.scalar} } func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + var err error o.seriesOnce.Do(func() { err = o.loadSeries(ctx) }) if err != nil { @@ -116,12 +105,14 @@ func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) { } func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() in, err := o.next.Next(ctx) if err != nil { @@ -181,7 +172,6 @@ func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) { o.next.GetPool().PutVectors(in) o.scalar.GetPool().PutVectors(scalarIn) - o.AddExecutionTimeTaken(time.Since(start)) return out, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go b/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go index 22e907d368..f8ae8a6208 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go @@ -8,12 +8,12 @@ import ( "fmt" "math" "sync" - - "golang.org/x/exp/slices" + "time" "github.com/cespare/xxhash/v2" "github.com/efficientgo/core/errors" "github.com/zhangyunhao116/umap" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -67,7 +67,9 @@ func NewVectorOperator( returnBool bool, opts *query.Options, ) (model.VectorOperator, error) { - o := &vectorOperator{ + return &vectorOperator{ + OperatorTelemetry: model.NewTelemetry("[vectorBinary]", opts.EnableAnalysis), + pool: pool, lhs: lhs, rhs: rhs, @@ -75,35 +77,20 @@ func NewVectorOperator( opType: opType, returnBool: returnBool, sigFunc: signatureFunc(matching.On, matching.MatchingLabels...), - } - - o.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - o.OperatorTelemetry = &model.TrackedTelemetry{} - } - return o, nil -} - -func (o *vectorOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*vectorOperator]") - next := make([]model.ObservableVectorOperator, 0, 2) - if obsnextParamOp, ok := o.lhs.(model.ObservableVectorOperator); ok { - next = append(next, obsnextParamOp) - } - if obsnext, ok := o.rhs.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - return o, next + }, nil } func (o *vectorOperator) Explain() (me string, next []model.VectorOperator) { if o.matching.On { - return fmt.Sprintf("[*vectorOperator] %s %v on %v group %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.MatchingLabels, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} + return fmt.Sprintf("[vectorBinary] %s - %v, on: %v, group: %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.MatchingLabels, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} } - return fmt.Sprintf("[*vectorOperator] %s %v ignoring %v group %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.On, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} + return fmt.Sprintf("[vectorBinary] %s - %v, ignoring: %v, group: %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.On, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} } func (o *vectorOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + if err := o.initOnce(ctx); err != nil { return nil, err } @@ -111,6 +98,9 @@ func (o *vectorOperator) Series(ctx context.Context) ([]labels.Labels, error) { } func (o *vectorOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() @@ -299,6 +289,11 @@ func (o *vectorOperator) execBinaryArithmetic(lhs, rhs model.StepVector) (model. return step, errors.Newf("Unexpected matching cardinality: %s", o.matching.Card.String()) } + // shortcut: if we have no samples on the high card side we cannot compute pairings + if len(hcs.Samples) == 0 { + return step, nil + } + for i, sampleID := range lcs.SampleIDs { jp := o.lcJoinBuckets[sampleID] // Hash collisions on the low-card-side would imply a many-to-many relation. @@ -505,6 +500,7 @@ func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { // Lifted from: https://github.com/prometheus/prometheus/blob/a38179c4e183d9b50b271167bf90050eda8ec3d1/promql/engine.go#L2430. // TODO: call with histogram values in followup PR. +// nolint: unparam func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { switch op { case parser.ADD: diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go index 8650f9a476..8c64e0ba23 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go @@ -34,6 +34,8 @@ func (c errorChan) getError() error { // coalesce guarantees that samples from different input vectors will be added to the output in the same order // as the input vectors themselves are provided in NewCoalesce. type coalesce struct { + model.OperatorTelemetry + once sync.Once series []labels.Labels @@ -46,37 +48,22 @@ type coalesce struct { inVectors [][]model.StepVector // sampleOffsets holds per-operator offsets needed to map an input sample ID to an output sample ID. sampleOffsets []uint64 - model.OperatorTelemetry } func NewCoalesce(pool *model.VectorPool, opts *query.Options, batchSize int64, operators ...model.VectorOperator) model.VectorOperator { - c := &coalesce{ + return &coalesce{ + OperatorTelemetry: model.NewTelemetry("[coalesce]", opts.EnableAnalysis), + pool: pool, sampleOffsets: make([]uint64, len(operators)), operators: operators, inVectors: make([][]model.StepVector, len(operators)), batchSize: batchSize, } - c.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - c.OperatorTelemetry = &model.TrackedTelemetry{} - } - return c -} - -func (c *coalesce) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - c.SetName("[*coalesce]") - obsOperators := make([]model.ObservableVectorOperator, 0, len(c.operators)) - for _, operator := range c.operators { - if obsOperator, ok := operator.(model.ObservableVectorOperator); ok { - obsOperators = append(obsOperators, obsOperator) - } - } - return c, obsOperators } func (c *coalesce) Explain() (me string, next []model.VectorOperator) { - return "[*coalesce]", c.operators + return "[coalesce]", c.operators } func (c *coalesce) GetPool() *model.VectorPool { @@ -84,6 +71,9 @@ func (c *coalesce) GetPool() *model.VectorPool { } func (c *coalesce) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { c.AddExecutionTimeTaken(time.Since(start)) }() + var err error c.once.Do(func() { err = c.loadSeries(ctx) }) if err != nil { @@ -93,13 +83,14 @@ func (c *coalesce) Series(ctx context.Context) ([]labels.Labels, error) { } func (c *coalesce) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { c.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() - defer func() { c.AddExecutionTimeTaken(time.Since(start)) }() var err error c.once.Do(func() { err = c.loadSeries(ctx) }) diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go index 35640582ef..f41bb86dac 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go @@ -10,6 +10,7 @@ import ( "time" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/query" "github.com/prometheus/prometheus/model/labels" ) @@ -27,30 +28,24 @@ type concurrencyOperator struct { model.OperatorTelemetry } -func NewConcurrent(next model.VectorOperator, bufferSize int) model.VectorOperator { - c := &concurrencyOperator{ +func NewConcurrent(next model.VectorOperator, bufferSize int, opts *query.Options) model.VectorOperator { + return &concurrencyOperator{ + OperatorTelemetry: model.NewTelemetry("[concurrent]", opts.EnableAnalysis), + next: next, buffer: make(chan maybeStepVector, bufferSize), bufferSize: bufferSize, } - c.OperatorTelemetry = &model.TrackedTelemetry{} - return c -} - -func (c *concurrencyOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - c.SetName(("[*concurrencyOperator]")) - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := c.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - return c, next } func (c *concurrencyOperator) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*concurrencyOperator(buff=%v)]", c.bufferSize), []model.VectorOperator{c.next} + return fmt.Sprintf("[concurrent(buff=%v)]", c.bufferSize), []model.VectorOperator{c.next} } func (c *concurrencyOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { c.AddExecutionTimeTaken(time.Since(start)) }() + return c.next.Series(ctx) } @@ -59,13 +54,15 @@ func (c *concurrencyOperator) GetPool() *model.VectorPool { } func (c *concurrencyOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { c.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() c.once.Do(func() { go c.pull(ctx) go c.drainBufferOnCancel(ctx) @@ -78,7 +75,6 @@ func (c *concurrencyOperator) Next(ctx context.Context) ([]model.StepVector, err if r.err != nil { return nil, r.err } - c.AddExecutionTimeTaken(time.Since(start)) return r.stepVector, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go index 7bcbdf3780..36b2d4b3c9 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/query" ) type dedupSample struct { @@ -30,6 +31,8 @@ type dedupCache []dedupSample // if multiple samples with the same ID are present in a StepVector, dedupOperator // will keep the last sample in that vector. type dedupOperator struct { + model.OperatorTelemetry + once sync.Once series []labels.Labels @@ -38,34 +41,25 @@ type dedupOperator struct { // outputIndex is a slice that is used as an index from input sample ID to output sample ID. outputIndex []uint64 dedupCache dedupCache - model.OperatorTelemetry -} - -func NewDedupOperator(pool *model.VectorPool, next model.VectorOperator) model.VectorOperator { - d := &dedupOperator{ - next: next, - pool: pool, - } - d.OperatorTelemetry = &model.TrackedTelemetry{} - return d } -func (d *dedupOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - d.SetName("[*dedup]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := d.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) +func NewDedupOperator(pool *model.VectorPool, next model.VectorOperator, opts *query.Options) model.VectorOperator { + return &dedupOperator{ + OperatorTelemetry: model.NewTelemetry("[dedup]", opts.EnableAnalysis), + next: next, + pool: pool, } - return d, next } func (d *dedupOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { d.AddExecutionTimeTaken(time.Since(start)) }() + var err error d.once.Do(func() { err = d.loadSeries(ctx) }) if err != nil { return nil, err } - start := time.Now() in, err := d.next.Next(ctx) if err != nil { @@ -105,12 +99,14 @@ func (d *dedupOperator) Next(ctx context.Context) ([]model.StepVector, error) { } result = append(result, out) } - d.AddExecutionTimeTaken(time.Since(start)) return result, nil } func (d *dedupOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { d.AddExecutionTimeTaken(time.Since(start)) }() + var err error d.once.Do(func() { err = d.loadSeries(ctx) }) if err != nil { @@ -124,7 +120,7 @@ func (d *dedupOperator) GetPool() *model.VectorPool { } func (d *dedupOperator) Explain() (me string, next []model.VectorOperator) { - return "[*dedup]", []model.VectorOperator{d.next} + return "[dedup]", []model.VectorOperator{d.next} } func (d *dedupOperator) loadSeries(ctx context.Context) error { diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go new file mode 100644 index 0000000000..e96845c3b5 --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go @@ -0,0 +1,124 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package exchange + +import ( + "context" + "sync" + "time" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/extlabels" + "github.com/thanos-io/promql-engine/query" +) + +type pair struct{ a, b int } + +type duplicateLabelCheckOperator struct { + model.OperatorTelemetry + + once sync.Once + next model.VectorOperator + + p []pair + c []uint64 +} + +func NewDuplicateLabelCheck(next model.VectorOperator, opts *query.Options) model.VectorOperator { + return &duplicateLabelCheckOperator{ + OperatorTelemetry: model.NewTelemetry("[duplicateLabelCheck]", opts.EnableAnalysis), + next: next, + } +} + +func (d *duplicateLabelCheckOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { d.AddExecutionTimeTaken(time.Since(start)) }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if err := d.init(ctx); err != nil { + return nil, err + } + + in, err := d.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + + // TODO: currently there is a bug, we need to reset 'd.c's state + // if the current timestamp changes. With configured BatchSize we + // dont see all samples for a timestamp in the same batch, but this + // logic relies on that. + if len(d.p) > 0 { + for i := range d.p { + d.c[d.p[i].a] = 0 + d.c[d.p[i].b] = 0 + } + for i, sv := range in { + for _, sid := range sv.SampleIDs { + d.c[sid] |= 2 << i + } + } + for i := range d.p { + if d.c[d.p[i].a]&d.c[d.p[i].b] > 0 { + return nil, extlabels.ErrDuplicateLabelSet + } + } + } + + return in, nil +} + +func (d *duplicateLabelCheckOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { d.AddExecutionTimeTaken(time.Since(start)) }() + + if err := d.init(ctx); err != nil { + return nil, err + } + return d.next.Series(ctx) +} + +func (d *duplicateLabelCheckOperator) GetPool() *model.VectorPool { + return d.next.GetPool() +} + +func (d *duplicateLabelCheckOperator) Explain() (me string, next []model.VectorOperator) { + return d.next.Explain() +} + +func (d *duplicateLabelCheckOperator) init(ctx context.Context) error { + var err error + d.once.Do(func() { + series, seriesErr := d.next.Series(ctx) + if seriesErr != nil { + err = seriesErr + return + } + m := make(map[uint64]int, len(series)) + p := make([]pair, 0) + c := make([]uint64, len(series)) + for i := range series { + h := series[i].Hash() + if j, ok := m[h]; ok { + p = append(p, pair{a: i, b: j}) + } else { + m[h] = i + } + } + d.p = p + d.c = c + }) + return err +} diff --git a/vendor/github.com/thanos-io/promql-engine/execution/execution.go b/vendor/github.com/thanos-io/promql-engine/execution/execution.go index d0b3711e6a..c4c2bb59a6 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/execution.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/execution.go @@ -24,9 +24,8 @@ import ( "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" "github.com/thanos-io/promql-engine/execution/aggregate" "github.com/thanos-io/promql-engine/execution/binary" @@ -53,7 +52,6 @@ func New(expr parser.Expr, queryable storage.Queryable, opts *query.Options) (mo End: opts.End.UnixMilli(), Step: opts.Step.Milliseconds(), } - return newOperator(expr, selectorPool, opts, hints) } @@ -61,164 +59,122 @@ func newOperator(expr parser.Expr, storage *engstore.SelectorPool, opts *query.O switch e := expr.(type) { case *parser.NumberLiteral: return scan.NewNumberLiteralSelector(model.NewVectorPool(opts.StepsBatch), opts, e.Val), nil - - case *parser.VectorSelector: - start, end := getTimeRangesForVectorSelector(e, opts, 0) - hints.Start = start - hints.End = end - filter := storage.GetSelector(start, end, opts.Step.Milliseconds(), e.LabelMatchers, hints) - return newShardedVectorSelector(filter, opts, e.Offset, 0) - case *logicalplan.VectorSelector: - start, end := getTimeRangesForVectorSelector(e.VectorSelector, opts, 0) - hints.Start = start - hints.End = end - selector := storage.GetFilteredSelector(start, end, opts.Step.Milliseconds(), e.LabelMatchers, e.Filters, hints) - return newShardedVectorSelector(selector, opts, e.Offset, e.BatchSize) - + return newVectorSelector(e, storage, opts, hints), nil case *parser.Call: - hints.Func = e.Func.Name - hints.Grouping = nil - hints.By = false - - // TODO(saswatamcode): Range vector result might need new operator - // before it can be non-nested. https://github.com/thanos-io/promql-engine/issues/39 - for i := range e.Args { - switch t := e.Args[i].(type) { - case *parser.SubqueryExpr: - if !opts.EnableSubqueries { - return nil, parse.ErrNotImplemented - } - return newSubqueryFunction(e, t, storage, opts, hints) - case *parser.MatrixSelector: - return newRangeVectorFunction(e, t, storage, opts, hints) - } - } - return newInstantVectorFunction(e, storage, opts, hints) - + return newCall(e, storage, opts, hints) case *parser.AggregateExpr: - hints.Func = e.Op.String() - hints.Grouping = e.Grouping - hints.By = !e.Without - var paramOp model.VectorOperator - - next, err := newOperator(e.Expr, storage, opts, hints) - if err != nil { - return nil, err - } - - if e.Param != nil && e.Param.Type() != parser.ValueTypeString { - paramOp, err = newOperator(e.Param, storage, opts, hints) - if err != nil { - return nil, err - } - } + return newAggregateExpression(e, storage, opts, hints) + case *parser.BinaryExpr: + return newBinaryExpression(e, storage, opts, hints) + case *parser.ParenExpr: + return newOperator(e.Expr, storage, opts, hints) + case *parser.UnaryExpr: + return newUnaryExpression(e, storage, opts, hints) + case *parser.StepInvariantExpr: + return newStepInvariantExpression(e, storage, opts, hints) + case logicalplan.Deduplicate: + return newDeduplication(e, storage, opts, hints) + case logicalplan.RemoteExecution: + return newRemoteExecution(e, opts, hints) + case logicalplan.CheckDuplicateLabels: + return newDuplicateLabelCheck(e, storage, opts, hints) + case logicalplan.Noop: + return noop.NewOperator(), nil + case logicalplan.UserDefinedExpr: + return e.MakeExecutionOperator(model.NewVectorPool(opts.StepsBatch), storage, opts, hints) + default: + return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s (%T)", e, e) + } +} - if e.Op == parser.TOPK || e.Op == parser.BOTTOMK { - next, err = aggregate.NewKHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts) - } else { - next, err = aggregate.NewHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts) - } +func newVectorSelector(e *logicalplan.VectorSelector, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) model.VectorOperator { + start, end := getTimeRangesForVectorSelector(e, opts, 0) + hints.Start = start + hints.End = end - if err != nil { - return nil, err - } + offset := e.Offset + batchsize := e.BatchSize + selector := storage.GetFilteredSelector(start, end, opts.Step.Milliseconds(), e.LabelMatchers, e.Filters, hints) + selectTimestamp := e.SelectTimestamp - return exchange.NewConcurrent(next, 2), nil + numShards := runtime.GOMAXPROCS(0) / 2 + if numShards < 1 { + numShards = 1 + } - case *parser.BinaryExpr: - if e.LHS.Type() == parser.ValueTypeScalar || e.RHS.Type() == parser.ValueTypeScalar { - return newScalarBinaryOperator(e, storage, opts, hints) - } + operators := make([]model.VectorOperator, 0, numShards) + for i := 0; i < numShards; i++ { + operator := scan.NewVectorSelector( + model.NewVectorPool(opts.StepsBatch), selector, opts, offset, hints, batchsize, selectTimestamp, i, numShards) + operators = append(operators, exchange.NewConcurrent(operator, 2, opts)) + } - return newVectorBinaryOperator(e, storage, opts, hints) + return exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, batchsize*int64(numShards), operators...) +} - case *parser.ParenExpr: - return newOperator(e.Expr, storage, opts, hints) +func newCall(e *parser.Call, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + hints.Func = e.Func.Name + hints.Grouping = nil + hints.By = false - case *parser.UnaryExpr: - next, err := newOperator(e.Expr, storage, opts, hints) - if err != nil { - return nil, err - } - switch e.Op { - case parser.ADD: - return next, nil - case parser.SUB: - return unary.NewUnaryNegation(next, opts.StepsBatch) - default: - // This shouldn't happen as Op was validated when parsing already - // https://github.com/prometheus/prometheus/blob/v2.38.0/promql/parser/parse.go#L573. - return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s", e) + if e.Func.Name == "absent_over_time" { + return newAbsentOverTimeOperator(e, storage, opts, hints) + } + // TODO(saswatamcode): Range vector result might need new operator + // before it can be non-nested. https://github.com/thanos-io/promql-engine/issues/39 + for i := range e.Args { + switch t := e.Args[i].(type) { + case *parser.SubqueryExpr: + return newSubqueryFunction(e, t, storage, opts, hints) + case *logicalplan.MatrixSelector: + return newRangeVectorFunction(e, t, storage, opts, hints) } + } + return newInstantVectorFunction(e, storage, opts, hints) +} - case *parser.StepInvariantExpr: - switch t := e.Expr.(type) { - case *parser.NumberLiteral: - return scan.NewNumberLiteralSelector(model.NewVectorPool(opts.StepsBatch), opts, t.Val), nil +func newAbsentOverTimeOperator(call *parser.Call, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + switch arg := call.Args[0].(type) { + case *parser.SubqueryExpr: + matrixCall := &parser.Call{ + Func: &parser.Function{Name: "last_over_time"}, } - next, err := newOperator(e.Expr, storage, opts.WithEndTime(opts.Start), hints) + argOp, err := newSubqueryFunction(matrixCall, arg, storage, opts, hints) if err != nil { return nil, err } - return step_invariant.NewStepInvariantOperator(model.NewVectorPoolWithSize(opts.StepsBatch, 1), next, e.Expr, opts) - - case logicalplan.Deduplicate: - // The Deduplicate operator will deduplicate samples using a last-sample-wins strategy. - // Sorting engines by MaxT ensures that samples produced due to - // staleness will be overwritten and corrected by samples coming from - // engines with a higher max time. - sort.Slice(e.Expressions, func(i, j int) bool { - return e.Expressions[i].Engine.MaxT() < e.Expressions[j].Engine.MaxT() - }) - - operators := make([]model.VectorOperator, len(e.Expressions)) - for i, expr := range e.Expressions { - operator, err := newOperator(expr, storage, opts, hints) - if err != nil { - return nil, err - } - operators[i] = operator + f := &parser.Call{ + Func: &parser.Function{Name: "absent"}, + Args: []parser.Expr{matrixCall}, } - coalesce := exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, 0, operators...) - dedup := exchange.NewDedupOperator(model.NewVectorPool(opts.StepsBatch), coalesce) - return exchange.NewConcurrent(dedup, 2), nil - - case logicalplan.RemoteExecution: - // Create a new remote query scoped to the calculated start time. - qry, err := e.Engine.NewRangeQuery(opts.Context, promql.NewPrometheusQueryOpts(false, opts.LookbackDelta), e.Query, e.QueryRangeStart, opts.End, opts.Step) + return function.NewFunctionOperator(f, []model.VectorOperator{argOp}, opts.StepsBatch, opts) + case *logicalplan.MatrixSelector: + matrixCall := &parser.Call{ + Func: &parser.Function{Name: "last_over_time"}, + Args: call.Args, + } + argOp, err := newRangeVectorFunction(matrixCall, arg, storage, opts, hints) if err != nil { return nil, err } - - // The selector uses the original query time to make sure that steps from different - // operators have the same timestamps. - // We need to set the lookback for the selector to 0 since the remote query already applies one lookback. - selectorOpts := *opts - selectorOpts.LookbackDelta = 0 - remoteExec := remote.NewExecution(qry, model.NewVectorPool(opts.StepsBatch), e.QueryRangeStart, &selectorOpts) - return exchange.NewConcurrent(remoteExec, 2), nil - case logicalplan.Noop: - return noop.NewOperator(), nil - case logicalplan.UserDefinedExpr: - return e.MakeExecutionOperator(model.NewVectorPool(opts.StepsBatch), storage, opts, hints) - default: - return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s", e) - } -} - -func unpackVectorSelector(t *parser.MatrixSelector) (int64, *parser.VectorSelector, []*labels.Matcher, error) { - switch t := t.VectorSelector.(type) { - case *parser.VectorSelector: - return 0, t, nil, nil - case *logicalplan.VectorSelector: - return t.BatchSize, t.VectorSelector, t.Filters, nil + f := &parser.Call{ + Func: &parser.Function{Name: "absent"}, + Args: []parser.Expr{&logicalplan.MatrixSelector{ + MatrixSelector: &parser.MatrixSelector{ + VectorSelector: arg.VectorSelector, + Range: arg.Range, + }, + OriginalString: arg.String(), + }}, + } + return function.NewFunctionOperator(f, []model.VectorOperator{argOp}, opts.StepsBatch, opts) default: - return 0, nil, nil, parse.ErrNotSupportedExpr + return nil, parse.ErrNotSupportedExpr } } -func newRangeVectorFunction(e *parser.Call, t *parser.MatrixSelector, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { +func newRangeVectorFunction(e *parser.Call, t *logicalplan.MatrixSelector, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { // TODO(saswatamcode): Range vector result might need new operator // before it can be non-nested. https://github.com/thanos-io/promql-engine/issues/39 batchSize, vs, filters, err := unpackVectorSelector(t) @@ -241,14 +197,33 @@ func newRangeVectorFunction(e *parser.Call, t *parser.MatrixSelector, storage *e if numShards < 1 { numShards = 1 } + var arg float64 + if e.Func.Name == "quantile_over_time" { + constVal, err := unwrapConstVal(e.Args[0]) + if err != nil { + return nil, err + } + arg = constVal + } operators := make([]model.VectorOperator, 0, numShards) for i := 0; i < numShards; i++ { - operator, err := scan.NewMatrixSelector(model.NewVectorPool(opts.StepsBatch), filter, e, opts, t.Range, vs.Offset, batchSize, i, numShards) + operator, err := scan.NewMatrixSelector( + model.NewVectorPool(opts.StepsBatch), + filter, + e.Func.Name, + arg, + opts, + t.Range, + vs.Offset, + batchSize, + i, + numShards, + ) if err != nil { return nil, err } - operators = append(operators, exchange.NewConcurrent(operator, 2)) + operators = append(operators, exchange.NewConcurrent(operator, 2, opts)) } return exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, batchSize*int64(numShards), operators...), nil @@ -259,8 +234,8 @@ func newSubqueryFunction(e *parser.Call, t *parser.SubqueryExpr, storage *engsto if parse.IsExtFunction(e.Func.Name) { return nil, parse.ErrNotImplemented } - // TODO: only instant queries for now. - if !opts.IsInstantQuery() { + // TODO: We dont pass arguments yet + if e.Func.Name == "quantile_over_time" { return nil, parse.ErrNotImplemented } nOpts := query.NestedOptionsForSubquery(opts, t) @@ -273,7 +248,13 @@ func newSubqueryFunction(e *parser.Call, t *parser.SubqueryExpr, storage *engsto if err != nil { return nil, err } - return scan.NewSubqueryOperator(model.NewVectorPool(opts.StepsBatch), inner, opts, e, t) + + outerOpts := *opts + if t.Timestamp != nil { + outerOpts.Start = time.UnixMilli(*t.Timestamp) + outerOpts.End = time.UnixMilli(*t.Timestamp) + } + return scan.NewSubqueryOperator(model.NewVectorPool(opts.StepsBatch), inner, &outerOpts, e, t) } func newInstantVectorFunction(e *parser.Call, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { @@ -293,40 +274,63 @@ func newInstantVectorFunction(e *parser.Call, storage *engstore.SelectorPool, op return function.NewFunctionOperator(e, nextOperators, opts.StepsBatch, opts) } -func newShardedVectorSelector(selector engstore.SeriesSelector, opts *query.Options, offset time.Duration, batchSize int64) (model.VectorOperator, error) { - numShards := runtime.GOMAXPROCS(0) / 2 - if numShards < 1 { - numShards = 1 +func newAggregateExpression(e *parser.AggregateExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + hints.Func = e.Op.String() + hints.Grouping = e.Grouping + hints.By = !e.Without + var paramOp model.VectorOperator + + next, err := newOperator(e.Expr, storage, opts, hints) + if err != nil { + return nil, err } - operators := make([]model.VectorOperator, 0, numShards) - for i := 0; i < numShards; i++ { - operator := exchange.NewConcurrent( - scan.NewVectorSelector( - model.NewVectorPool(opts.StepsBatch), selector, opts, offset, batchSize, i, numShards), 2) - operators = append(operators, operator) + + if e.Param != nil && e.Param.Type() != parser.ValueTypeString { + paramOp, err = newOperator(e.Param, storage, opts, hints) + if err != nil { + return nil, err + } } - return exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, batchSize*int64(numShards), operators...), nil + if e.Op == parser.TOPK || e.Op == parser.BOTTOMK { + next, err = aggregate.NewKHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts) + } else { + next, err = aggregate.NewHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts) + } + + if err != nil { + return nil, err + } + + return exchange.NewConcurrent(next, 2, opts), nil + } -func newVectorBinaryOperator(e *parser.BinaryExpr, selectorPool *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { - leftOperator, err := newOperator(e.LHS, selectorPool, opts, hints) +func newBinaryExpression(e *parser.BinaryExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + if e.LHS.Type() == parser.ValueTypeScalar || e.RHS.Type() == parser.ValueTypeScalar { + return newScalarBinaryOperator(e, storage, opts, hints) + } + return newVectorBinaryOperator(e, storage, opts, hints) +} + +func newVectorBinaryOperator(e *parser.BinaryExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + leftOperator, err := newOperator(e.LHS, storage, opts, hints) if err != nil { return nil, err } - rightOperator, err := newOperator(e.RHS, selectorPool, opts, hints) + rightOperator, err := newOperator(e.RHS, storage, opts, hints) if err != nil { return nil, err } return binary.NewVectorOperator(model.NewVectorPool(opts.StepsBatch), leftOperator, rightOperator, e.VectorMatching, e.Op, e.ReturnBool, opts) } -func newScalarBinaryOperator(e *parser.BinaryExpr, selectorPool *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { - lhs, err := newOperator(e.LHS, selectorPool, opts, hints) +func newScalarBinaryOperator(e *parser.BinaryExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + lhs, err := newOperator(e.LHS, storage, opts, hints) if err != nil { return nil, err } - rhs, err := newOperator(e.RHS, selectorPool, opts, hints) + rhs, err := newOperator(e.RHS, storage, opts, hints) if err != nil { return nil, err } @@ -342,8 +346,83 @@ func newScalarBinaryOperator(e *parser.BinaryExpr, selectorPool *engstore.Select return binary.NewScalar(model.NewVectorPoolWithSize(opts.StepsBatch, 1), lhs, rhs, e.Op, scalarSide, e.ReturnBool, opts) } +func newUnaryExpression(e *parser.UnaryExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + next, err := newOperator(e.Expr, storage, opts, hints) + if err != nil { + return nil, err + } + switch e.Op { + case parser.ADD: + return next, nil + case parser.SUB: + return unary.NewUnaryNegation(next, opts) + default: + // This shouldn't happen as Op was validated when parsing already + // https://github.com/prometheus/prometheus/blob/v2.38.0/promql/parser/parse.go#L573. + return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s", e) + } +} + +func newStepInvariantExpression(e *parser.StepInvariantExpr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + switch t := e.Expr.(type) { + case *parser.NumberLiteral: + return scan.NewNumberLiteralSelector(model.NewVectorPool(opts.StepsBatch), opts, t.Val), nil + } + next, err := newOperator(e.Expr, storage, opts.WithEndTime(opts.Start), hints) + if err != nil { + return nil, err + } + return step_invariant.NewStepInvariantOperator(model.NewVectorPoolWithSize(opts.StepsBatch, 1), next, e.Expr, opts) +} + +func newDeduplication(e logicalplan.Deduplicate, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + // The Deduplicate operator will deduplicate samples using a last-sample-wins strategy. + // Sorting engines by MaxT ensures that samples produced due to + // staleness will be overwritten and corrected by samples coming from + // engines with a higher max time. + sort.Slice(e.Expressions, func(i, j int) bool { + return e.Expressions[i].Engine.MaxT() < e.Expressions[j].Engine.MaxT() + }) + + operators := make([]model.VectorOperator, len(e.Expressions)) + for i, expr := range e.Expressions { + operator, err := newOperator(expr, storage, opts, hints) + if err != nil { + return nil, err + } + operators[i] = operator + } + coalesce := exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, 0, operators...) + dedup := exchange.NewDedupOperator(model.NewVectorPool(opts.StepsBatch), coalesce, opts) + return exchange.NewConcurrent(dedup, 2, opts), nil +} + +func newRemoteExecution(e logicalplan.RemoteExecution, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + // Create a new remote query scoped to the calculated start time. + qry, err := e.Engine.NewRangeQuery(opts.Context, promql.NewPrometheusQueryOpts(false, opts.LookbackDelta), e.Query, e.QueryRangeStart, opts.End, opts.Step) + if err != nil { + return nil, err + } + + // The selector uses the original query time to make sure that steps from different + // operators have the same timestamps. + // We need to set the lookback for the selector to 0 since the remote query already applies one lookback. + selectorOpts := *opts + selectorOpts.LookbackDelta = 0 + remoteExec := remote.NewExecution(qry, model.NewVectorPool(opts.StepsBatch), e.QueryRangeStart, &selectorOpts, hints) + return exchange.NewConcurrent(remoteExec, 2, opts), nil +} + +func newDuplicateLabelCheck(e logicalplan.CheckDuplicateLabels, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + op, err := newOperator(e.Expr, storage, opts, hints) + if err != nil { + return nil, err + } + return exchange.NewDuplicateLabelCheck(op, opts), nil +} + // Copy from https://github.com/prometheus/prometheus/blob/v2.39.1/promql/engine.go#L791. -func getTimeRangesForVectorSelector(n *parser.VectorSelector, opts *query.Options, evalRange int64) (int64, int64) { +func getTimeRangesForVectorSelector(n *logicalplan.VectorSelector, opts *query.Options, evalRange int64) (int64, int64) { start := opts.Start.UnixMilli() end := opts.End.UnixMilli() if n.Timestamp != nil { @@ -358,3 +437,23 @@ func getTimeRangesForVectorSelector(n *parser.VectorSelector, opts *query.Option offset := n.OriginalOffset.Milliseconds() return start - offset, end - offset } + +func unwrapConstVal(e parser.Expr) (float64, error) { + switch c := e.(type) { + case *parser.NumberLiteral: + return c.Val, nil + case *parser.StepInvariantExpr: + return unwrapConstVal(c.Expr) + } + + return 0, errors.Wrap(parse.ErrNotSupportedExpr, "matrix selector argument must be a constant") +} + +func unpackVectorSelector(t *logicalplan.MatrixSelector) (int64, *logicalplan.VectorSelector, []*labels.Matcher, error) { + switch t := t.VectorSelector.(type) { + case *logicalplan.VectorSelector: + return t.BatchSize, t, t.Filters, nil + default: + return 0, nil, nil, parse.ErrNotSupportedExpr + } +} diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go b/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go index b29f04dda2..f2286f54ed 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go @@ -9,50 +9,62 @@ import ( "time" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" ) type absentOperator struct { + model.OperatorTelemetry + once sync.Once funcExpr *parser.Call series []labels.Labels pool *model.VectorPool next model.VectorOperator - model.OperatorTelemetry } -func (o *absentOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*absentOperator]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := o.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) +func newAbsentOperator( + funcExpr *parser.Call, + pool *model.VectorPool, + next model.VectorOperator, + opts *query.Options, +) *absentOperator { + return &absentOperator{ + OperatorTelemetry: model.NewTelemetry(absentOperatorName, opts.EnableAnalysis), + funcExpr: funcExpr, + pool: pool, + next: next, } - return o, next } func (o *absentOperator) Explain() (me string, next []model.VectorOperator) { - return "[*absentOperator]", []model.VectorOperator{} + return absentOperatorName, []model.VectorOperator{o.next} } func (o *absentOperator) Series(_ context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + o.loadSeries() return o.series, nil } func (o *absentOperator) loadSeries() { + // we need to put the filtered labels back for absent to compute its series properly o.once.Do(func() { o.pool.SetStepSize(1) // https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L1385 var lm []*labels.Matcher switch n := o.funcExpr.Args[0].(type) { - case *parser.VectorSelector: - lm = n.LabelMatchers - case *parser.MatrixSelector: - lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers + case *logicalplan.VectorSelector: + lm = append(n.LabelMatchers, n.Filters...) + case *logicalplan.MatrixSelector: + v := n.VectorSelector.(*logicalplan.VectorSelector) + lm = append(v.LabelMatchers, v.Filters...) default: o.series = []labels.Labels{labels.EmptyLabels()} return @@ -80,13 +92,16 @@ func (o *absentOperator) GetPool() *model.VectorPool { } func (o *absentOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } + o.loadSeries() - start := time.Now() vectors, err := o.next.Next(ctx) if err != nil { @@ -106,6 +121,5 @@ func (o *absentOperator) Next(ctx context.Context) ([]model.StepVector, error) { o.next.GetPool().PutStepVector(vectors[i]) } o.next.GetPool().PutVectors(vectors) - o.AddExecutionTimeTaken(time.Since(start)) return result, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/functions.go b/vendor/github.com/thanos-io/promql-engine/execution/function/functions.go index 06afa52706..5b76502fc1 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/functions.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/functions.go @@ -152,6 +152,15 @@ var instantVectorFuncs = map[string]functionCall{ "year": func(f float64, h *histogram.FloatHistogram, vargs ...float64) (float64, bool) { return year(dateFromSampleValue(f)), true }, + // hack we only have sort functions as argument for "timestamp" possibly so they dont actually + // need to sort anything. This is only for compatibility to prometheus as this sort of query does + // not make too much sense. + "sort": simpleFunc(func(v float64) float64 { + return v + }), + "sort_desc": simpleFunc(func(v float64) float64 { + return v + }), } type noArgFunctionCall func(t int64) float64 diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go b/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go index 306e8ba764..3050badcd8 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go @@ -13,11 +13,11 @@ import ( "github.com/cespare/xxhash/v2" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" "github.com/thanos-io/promql-engine/extlabels" + "github.com/thanos-io/promql-engine/query" ) type histogramSeries struct { @@ -28,12 +28,12 @@ type histogramSeries struct { // histogramOperator is a function operator that calculates percentiles. type histogramOperator struct { - pool *model.VectorPool + model.OperatorTelemetry + once sync.Once + series []labels.Labels + pool *model.VectorPool funcArgs parser.Expressions - - once sync.Once - series []labels.Labels scalarOp model.VectorOperator vectorOp model.VectorOperator @@ -47,27 +47,34 @@ type histogramOperator struct { // seriesBuckets are the buckets for each individual conventional histogram series. seriesBuckets []buckets - model.OperatorTelemetry } -func (o *histogramOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*functionOperator]") - next := make([]model.ObservableVectorOperator, 0, 2) - if obsScalarOp, ok := o.scalarOp.(model.ObservableVectorOperator); ok { - next = append(next, obsScalarOp) - } - if obsVectorOp, ok := o.vectorOp.(model.ObservableVectorOperator); ok { - next = append(next, obsVectorOp) +func newHistogramOperator( + pool *model.VectorPool, + funcArgs parser.Expressions, + scalarOp model.VectorOperator, + vectorOp model.VectorOperator, + opts *query.Options, +) *histogramOperator { + return &histogramOperator{ + pool: pool, + funcArgs: funcArgs, + scalarOp: scalarOp, + vectorOp: vectorOp, + scalarPoints: make([]float64, opts.StepsBatch), + OperatorTelemetry: model.NewTelemetry(histogramOperatorName, opts.EnableAnalysis), } - return o, next } func (o *histogramOperator) Explain() (me string, next []model.VectorOperator) { next = []model.VectorOperator{o.scalarOp, o.vectorOp} - return fmt.Sprintf("[*functionOperator] histogram_quantile(%v)", o.funcArgs), next + return fmt.Sprintf("%s(%v)", histogramOperatorName, o.funcArgs), next } func (o *histogramOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + var err error o.once.Do(func() { err = o.loadSeries(ctx) }) if err != nil { @@ -82,12 +89,15 @@ func (o *histogramOperator) GetPool() *model.VectorPool { } func (o *histogramOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() + var err error o.once.Do(func() { err = o.loadSeries(ctx) }) if err != nil { @@ -116,7 +126,6 @@ func (o *histogramOperator) Next(ctx context.Context) ([]model.StepVector, error o.scalarOp.GetPool().PutStepVector(scalar) } o.scalarOp.GetPool().PutVectors(scalars) - o.AddExecutionTimeTaken(time.Since(start)) return o.processInputSeries(vectors) } @@ -174,8 +183,8 @@ func (o *histogramOperator) processInputSeries(vectors []model.StepVector) ([]mo out = append(out, step) o.vectorOp.GetPool().PutStepVector(vector) } - o.vectorOp.GetPool().PutVectors(vectors) + return out, nil } @@ -184,9 +193,6 @@ func (o *histogramOperator) loadSeries(ctx context.Context) error { if err != nil { return err } - if extlabels.ContainsDuplicateLabelSetAfterDroppingName(series) { - return extlabels.ErrDuplicateLabelSet - } var ( hashBuf = make([]byte, 0, 256) @@ -205,13 +211,17 @@ func (o *histogramOperator) loadSeries(ctx context.Context) error { hasBucketValue = false } - lbls, _ = extlabels.DropMetricName(lbls, b) hasher.Reset() hashBuf = lbls.Bytes(hashBuf) if _, err := hasher.Write(hashBuf); err != nil { return err } + // We check for duplicate series after dropped labels when + // showing the result of the query. Series that are equal after + // dropping name should not hash to the same bucket here. + lbls, _ = extlabels.DropMetricName(lbls, b) + seriesHash := hasher.Sum64() seriesID, ok := seriesHashes[seriesHash] if !ok { diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go b/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go index 9276a431a5..2eba119445 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go @@ -16,6 +16,8 @@ import ( ) type noArgFunctionOperator struct { + model.OperatorTelemetry + mint int64 maxt int64 step int64 @@ -26,20 +28,16 @@ type noArgFunctionOperator struct { vectorPool *model.VectorPool series []labels.Labels sampleIDs []uint64 - model.OperatorTelemetry -} - -func (o *noArgFunctionOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*noArgFunctionOperator]") - return o, []model.ObservableVectorOperator{} } func (o *noArgFunctionOperator) Explain() (me string, next []model.VectorOperator) { - - return fmt.Sprintf("[*noArgFunctionOperator] %v()", o.funcExpr.Func.Name), []model.VectorOperator{} + return fmt.Sprintf("%s %s()", noArgFunctionOperatorName, o.funcExpr.Func.Name), []model.VectorOperator{} } func (o *noArgFunctionOperator) Series(_ context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + return o.series, nil } @@ -47,11 +45,20 @@ func (o *noArgFunctionOperator) GetPool() *model.VectorPool { return o.vectorPool } -func (o *noArgFunctionOperator) Next(_ context.Context) ([]model.StepVector, error) { +func (o *noArgFunctionOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + if o.currentStep > o.maxt { return nil, nil } - start := time.Now() + ret := o.vectorPool.GetVectorBatch() for i := 0; i < o.stepsBatch && o.currentStep <= o.maxt; i++ { sv := o.vectorPool.GetStepVector(o.currentStep) @@ -60,7 +67,6 @@ func (o *noArgFunctionOperator) Next(_ context.Context) ([]model.StepVector, err ret = append(ret, sv) o.currentStep += o.step } - o.AddExecutionTimeTaken(time.Since(start)) return ret, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go index 24a13cc3c6..42333d1894 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go @@ -12,7 +12,6 @@ import ( "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" @@ -21,63 +20,29 @@ import ( "github.com/thanos-io/promql-engine/query" ) -// functionOperator returns []model.StepVector after processing input with desired function. -type functionOperator struct { - funcExpr *parser.Call - series []labels.Labels - once sync.Once - - vectorIndex int - nextOps []model.VectorOperator - - call functionCall - scalarPoints [][]float64 - model.OperatorTelemetry -} - -func SetTelemetry(opts *query.Options) model.OperatorTelemetry { - if opts.EnableAnalysis { - return &model.TrackedTelemetry{} - } - return &model.NoopTelemetry{} -} +const ( + absentOperatorName = "[absent]" + functionOperatorName = "[function]" + histogramOperatorName = "[histogram_quantile]" + relabelOperatorName = "[relabel]" + scalarOperatorName = "[scalar]" + timestampOperatorName = "[timestamp]" + noArgFunctionOperatorName = "[noArgFunction]" +) func NewFunctionOperator(funcExpr *parser.Call, nextOps []model.VectorOperator, stepsBatch int, opts *query.Options) (model.VectorOperator, error) { // Some functions need to be handled in special operators - switch funcExpr.Func.Name { case "scalar": - return &scalarFunctionOperator{ - next: nextOps[0], - pool: model.NewVectorPoolWithSize(stepsBatch, 1), - OperatorTelemetry: SetTelemetry(opts), - }, nil - + return newScalarOperator(model.NewVectorPoolWithSize(stepsBatch, 1), nextOps[0], opts), nil + case "timestamp": + return newTimestampOperator(nextOps[0], opts), nil case "label_join", "label_replace": - return &relabelFunctionOperator{ - next: nextOps[0], - funcExpr: funcExpr, - OperatorTelemetry: SetTelemetry(opts), - }, nil - + return newRelabelOperator(nextOps[0], funcExpr, opts), nil case "absent": - return &absentOperator{ - next: nextOps[0], - pool: model.NewVectorPool(stepsBatch), - funcExpr: funcExpr, - OperatorTelemetry: SetTelemetry(opts), - }, nil - + return newAbsentOperator(funcExpr, model.NewVectorPool(stepsBatch), nextOps[0], opts), nil case "histogram_quantile": - return &histogramOperator{ - pool: model.NewVectorPool(stepsBatch), - funcArgs: funcExpr.Args, - once: sync.Once{}, - scalarOp: nextOps[0], - vectorOp: nextOps[1], - scalarPoints: make([]float64, stepsBatch), - OperatorTelemetry: SetTelemetry(opts), - }, nil + return newHistogramOperator(model.NewVectorPool(stepsBatch), funcExpr.Args, nextOps[0], nextOps[1], opts), nil } // Short-circuit functions that take no args. Their only input is the step's timestamp. @@ -101,14 +66,15 @@ func newNoArgsFunctionOperator(funcExpr *parser.Call, stepsBatch int, opts *quer } op := &noArgFunctionOperator{ - currentStep: opts.Start.UnixMilli(), - mint: opts.Start.UnixMilli(), - maxt: opts.End.UnixMilli(), - step: interval, - stepsBatch: stepsBatch, - funcExpr: funcExpr, - call: call, - vectorPool: model.NewVectorPool(stepsBatch), + OperatorTelemetry: model.NewTelemetry(noArgFunctionOperatorName, opts.EnableAnalysis), + currentStep: opts.Start.UnixMilli(), + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + step: interval, + stepsBatch: stepsBatch, + funcExpr: funcExpr, + call: call, + vectorPool: model.NewVectorPool(stepsBatch), } switch funcExpr.Func.Name { case "pi", "time": @@ -118,14 +84,25 @@ func newNoArgsFunctionOperator(funcExpr *parser.Call, stepsBatch int, opts *quer op.series = []labels.Labels{{}} op.sampleIDs = []uint64{0} } - op.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - op.OperatorTelemetry = &model.TrackedTelemetry{} - } return op, nil } +// functionOperator returns []model.StepVector after processing input with desired function. +type functionOperator struct { + model.OperatorTelemetry + + funcExpr *parser.Call + series []labels.Labels + once sync.Once + + vectorIndex int + nextOps []model.VectorOperator + + call functionCall + scalarPoints [][]float64 +} + func newInstantVectorFunctionOperator(funcExpr *parser.Call, nextOps []model.VectorOperator, stepsBatch int, opts *query.Options) (model.VectorOperator, error) { call, ok := instantVectorFuncs[funcExpr.Func.Name] if !ok { @@ -137,11 +114,12 @@ func newInstantVectorFunctionOperator(funcExpr *parser.Call, nextOps []model.Vec scalarPoints[i] = make([]float64, len(nextOps)-1) } f := &functionOperator{ - nextOps: nextOps, - call: call, - funcExpr: funcExpr, - vectorIndex: 0, - scalarPoints: scalarPoints, + OperatorTelemetry: model.NewTelemetry(functionOperatorName, opts.EnableAnalysis), + nextOps: nextOps, + call: call, + funcExpr: funcExpr, + vectorIndex: 0, + scalarPoints: scalarPoints, } for i := range funcExpr.Args { @@ -150,10 +128,6 @@ func newInstantVectorFunctionOperator(funcExpr *parser.Call, nextOps []model.Vec break } } - f.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - f.OperatorTelemetry = &model.TrackedTelemetry{} - } // Check selector type. switch funcExpr.Args[f.vectorIndex].Type() { @@ -164,22 +138,14 @@ func newInstantVectorFunctionOperator(funcExpr *parser.Call, nextOps []model.Vec } } -func (o *functionOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*functionOperator]") - obsOperators := make([]model.ObservableVectorOperator, 0, len(o.nextOps)) - for _, operator := range o.nextOps { - if obsOperator, ok := operator.(model.ObservableVectorOperator); ok { - obsOperators = append(obsOperators, obsOperator) - } - } - return o, obsOperators -} - func (o *functionOperator) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*functionOperator] %v(%v)", o.funcExpr.Func.Name, o.funcExpr.Args), o.nextOps + return fmt.Sprintf("%s %v(%v)", functionOperatorName, o.funcExpr.Func.Name, o.funcExpr.Args), o.nextOps } func (o *functionOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + if err := o.loadSeries(ctx); err != nil { return nil, err } @@ -192,16 +158,18 @@ func (o *functionOperator) GetPool() *model.VectorPool { } func (o *functionOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - if err := o.loadSeries(ctx); err != nil { return nil, err } - start := time.Now() + // Process non-variadic single/multi-arg instant vector and scalar input functions. // Call next on vector input. vectors, err := o.nextOps[o.vectorIndex].Next(ctx) @@ -261,8 +229,6 @@ func (o *functionOperator) Next(ctx context.Context) ([]model.StepVector, error) } } - o.AddExecutionTimeTaken(time.Since(start)) - return vectors, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go b/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go index 38127f0fbc..3db44b320e 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go @@ -17,47 +17,55 @@ import ( "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/query" ) -type relabelFunctionOperator struct { +type relabelOperator struct { + model.OperatorTelemetry + next model.VectorOperator funcExpr *parser.Call once sync.Once series []labels.Labels - model.OperatorTelemetry } -func (o *relabelFunctionOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*relabelFunctionOperator]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := o.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) +func newRelabelOperator( + next model.VectorOperator, + funcExpr *parser.Call, + opts *query.Options, +) *relabelOperator { + return &relabelOperator{ + OperatorTelemetry: model.NewTelemetry(relabelOperatorName, opts.EnableAnalysis), + next: next, + funcExpr: funcExpr, } - return o, next } -func (o *relabelFunctionOperator) Explain() (me string, next []model.VectorOperator) { - return "[*relabelFunctionOperator]", []model.VectorOperator{} +func (o *relabelOperator) Explain() (me string, next []model.VectorOperator) { + return relabelOperatorName, []model.VectorOperator{} } -func (o *relabelFunctionOperator) Series(ctx context.Context) ([]labels.Labels, error) { +func (o *relabelOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + var err error o.once.Do(func() { err = o.loadSeries(ctx) }) return o.series, err } -func (o *relabelFunctionOperator) GetPool() *model.VectorPool { +func (o *relabelOperator) GetPool() *model.VectorPool { return o.next.GetPool() } -func (o *relabelFunctionOperator) Next(ctx context.Context) ([]model.StepVector, error) { +func (o *relabelOperator) Next(ctx context.Context) ([]model.StepVector, error) { start := time.Now() - next, err := o.next.Next(ctx) - o.AddExecutionTimeTaken(time.Since(start)) - return next, err + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + + return o.next.Next(ctx) } -func (o *relabelFunctionOperator) loadSeries(ctx context.Context) (err error) { +func (o *relabelOperator) loadSeries(ctx context.Context) (err error) { series, err := o.next.Series(ctx) if err != nil { return err @@ -86,7 +94,7 @@ func unwrap(expr parser.Expr) (string, error) { } } -func (o *relabelFunctionOperator) loadSeriesForLabelJoin(series []labels.Labels) error { +func (o *relabelOperator) loadSeriesForLabelJoin(series []labels.Labels) error { labelJoinDst, err := unwrap(o.funcExpr.Args[1]) if err != nil { return errors.Wrap(err, "unable to unwrap string argument") @@ -124,7 +132,7 @@ func (o *relabelFunctionOperator) loadSeriesForLabelJoin(series []labels.Labels) } return nil } -func (o *relabelFunctionOperator) loadSeriesForLabelReplace(series []labels.Labels) error { +func (o *relabelOperator) loadSeriesForLabelReplace(series []labels.Labels) error { labelReplaceDst, err := unwrap(o.funcExpr.Args[1]) if err != nil { return errors.Wrap(err, "unable to unwrap string argument") diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go b/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go index 9d6a855317..fc1a77b070 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go @@ -11,42 +11,48 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/query" ) -type scalarFunctionOperator struct { +type scalarOperator struct { pool *model.VectorPool next model.VectorOperator model.OperatorTelemetry } -func (o *scalarFunctionOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*scalarFunctionOperator]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := o.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) +func newScalarOperator(pool *model.VectorPool, next model.VectorOperator, opts *query.Options) *scalarOperator { + return &scalarOperator{ + pool: pool, + next: next, + OperatorTelemetry: model.NewTelemetry(scalarOperatorName, opts.EnableAnalysis), } - return o, next } -func (o *scalarFunctionOperator) Explain() (me string, next []model.VectorOperator) { - return "[*scalarFunctionOperator]", []model.VectorOperator{} +func (o *scalarOperator) Explain() (me string, next []model.VectorOperator) { + return scalarOperatorName, []model.VectorOperator{o.next} } -func (o *scalarFunctionOperator) Series(ctx context.Context) ([]labels.Labels, error) { +func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + return nil, nil } -func (o *scalarFunctionOperator) GetPool() *model.VectorPool { +func (o *scalarOperator) GetPool() *model.VectorPool { return o.pool } -func (o *scalarFunctionOperator) Next(ctx context.Context) ([]model.StepVector, error) { +func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() + in, err := o.next.Next(ctx) if err != nil { return nil, err @@ -67,7 +73,6 @@ func (o *scalarFunctionOperator) Next(ctx context.Context) ([]model.StepVector, o.next.GetPool().PutStepVector(vector) } o.next.GetPool().PutVectors(in) - o.AddExecutionTimeTaken(time.Since(start)) return result, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go b/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go new file mode 100644 index 0000000000..471f367fda --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go @@ -0,0 +1,91 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package function + +import ( + "context" + "sync" + "time" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/extlabels" + "github.com/thanos-io/promql-engine/query" +) + +type timestampOperator struct { + next model.VectorOperator + model.OperatorTelemetry + + series []labels.Labels + once sync.Once +} + +func newTimestampOperator(next model.VectorOperator, opts *query.Options) *timestampOperator { + return ×tampOperator{ + next: next, + OperatorTelemetry: model.NewTelemetry(timestampOperatorName, opts.EnableAnalysis), + } +} + +func (o *timestampOperator) Explain() (me string, next []model.VectorOperator) { + return timestampOperatorName, []model.VectorOperator{o.next} +} + +func (o *timestampOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + return o.series, nil +} + +func (o *timestampOperator) loadSeries(ctx context.Context) error { + var err error + o.once.Do(func() { + series, loadErr := o.next.Series(ctx) + if loadErr != nil { + err = loadErr + return + } + o.series = make([]labels.Labels, len(series)) + + b := labels.ScratchBuilder{} + for i, s := range series { + lbls, _ := extlabels.DropMetricName(s, b) + o.series[i] = lbls + } + }) + + return err +} + +func (o *timestampOperator) GetPool() *model.VectorPool { + return o.next.GetPool() +} + +func (o *timestampOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + in, err := o.next.Next(ctx) + if err != nil { + return nil, err + } + for _, vector := range in { + for i := range vector.Samples { + vector.Samples[i] = float64(vector.T / 1000) + } + } + return in, nil +} diff --git a/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go index 021a36fe43..176377dc95 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go @@ -10,44 +10,51 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -type NoopTelemetry struct{} - -type TrackedTelemetry struct { - name string - ExecutionTime time.Duration +type OperatorTelemetry interface { + AddExecutionTimeTaken(time.Duration) + ExecutionTimeTaken() time.Duration + Name() string } -func (ti *NoopTelemetry) AddExecutionTimeTaken(t time.Duration) {} +func NewTelemetry(name string, enabled bool) OperatorTelemetry { + if enabled { + return NewTrackedTelemetry(name) + } + return NewNoopTelemetry(name) -func (ti *TrackedTelemetry) AddExecutionTimeTaken(t time.Duration) { - ti.ExecutionTime += t } -func (ti *TrackedTelemetry) Name() string { - return ti.name +type NoopTelemetry struct { + name string } -func (ti *TrackedTelemetry) SetName(operatorName string) { - ti.name = operatorName +func NewNoopTelemetry(name string) *NoopTelemetry { + return &NoopTelemetry{name: name} } -func (ti *NoopTelemetry) Name() string { - return "" +func (tm *NoopTelemetry) Name() string { return tm.name } + +func (tm *NoopTelemetry) AddExecutionTimeTaken(t time.Duration) {} + +func (tm *NoopTelemetry) ExecutionTimeTaken() time.Duration { + return time.Duration(0) } -func (ti *NoopTelemetry) SetName(operatorName string) {} +type TrackedTelemetry struct { + name string + ExecutionTime time.Duration +} -type OperatorTelemetry interface { - AddExecutionTimeTaken(time.Duration) - ExecutionTimeTaken() time.Duration - SetName(string) - Name() string +func NewTrackedTelemetry(name string) *TrackedTelemetry { + return &TrackedTelemetry{name: name} } -func (ti *NoopTelemetry) ExecutionTimeTaken() time.Duration { - return time.Duration(0) +func (ti *TrackedTelemetry) Name() string { + return ti.name } +func (ti *TrackedTelemetry) AddExecutionTimeTaken(t time.Duration) { ti.ExecutionTime += t } + func (ti *TrackedTelemetry) ExecutionTimeTaken() time.Duration { return ti.ExecutionTime } @@ -55,7 +62,6 @@ func (ti *TrackedTelemetry) ExecutionTimeTaken() time.Duration { type ObservableVectorOperator interface { VectorOperator OperatorTelemetry - Analyze() (OperatorTelemetry, []ObservableVectorOperator) } // VectorOperator performs operations on series in step by step fashion. diff --git a/vendor/github.com/thanos-io/promql-engine/execution/parse/functions.go b/vendor/github.com/thanos-io/promql-engine/execution/parse/functions.go index 64a4630923..0ba8173df6 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/parse/functions.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/parse/functions.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + package parse import ( diff --git a/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go index 5e1928b34b..7e376bcb25 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" "github.com/thanos-io/promql-engine/execution/model" "github.com/thanos-io/promql-engine/execution/scan" @@ -28,35 +29,30 @@ type Execution struct { model.OperatorTelemetry } -func NewExecution(query promql.Query, pool *model.VectorPool, queryRangeStart time.Time, opts *query.Options) *Execution { +func NewExecution(query promql.Query, pool *model.VectorPool, queryRangeStart time.Time, opts *query.Options, hints storage.SelectHints) *Execution { storage := newStorageFromQuery(query, opts) - e := &Execution{ - storage: storage, - query: query, - opts: opts, - queryRangeStart: queryRangeStart, - vectorSelector: scan.NewVectorSelector(pool, storage, opts, 0, 0, 0, 1), + return &Execution{ + storage: storage, + query: query, + opts: opts, + queryRangeStart: queryRangeStart, + vectorSelector: scan.NewVectorSelector(pool, storage, opts, 0, hints, 0, false, 0, 1), + OperatorTelemetry: model.NewTelemetry("[remoteExec]", opts.EnableAnalysis), } - e.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - e.OperatorTelemetry = &model.TrackedTelemetry{} - } - return e -} - -func (e *Execution) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - e.SetName("[*remoteExec]") - return e, nil } func (e *Execution) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { e.AddExecutionTimeTaken(time.Since(start)) }() + return e.vectorSelector.Series(ctx) } func (e *Execution) Next(ctx context.Context) ([]model.StepVector, error) { start := time.Now() + defer func() { e.AddExecutionTimeTaken(time.Since(start)) }() + next, err := e.vectorSelector.Next(ctx) - e.AddExecutionTimeTaken(time.Since(start)) if next == nil { // Closing the storage prematurely can lead to results from the query // engine to be recycled. Because of this, we close the storage only @@ -71,7 +67,7 @@ func (e *Execution) GetPool() *model.VectorPool { } func (e *Execution) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*remoteExec] %s (%d, %d)", e.query, e.opts.Start.Unix(), e.opts.End.Unix()), nil + return fmt.Sprintf("[remoteExec] %s (%d, %d)", e.query, e.queryRangeStart.Unix(), e.opts.End.Unix()), nil } type storageAdapter struct { @@ -108,7 +104,6 @@ func (s *storageAdapter) executeQuery(ctx context.Context) { s.err = result.Err return } - switch val := result.Value.(type) { case promql.Matrix: s.series = make([]engstore.SignedSeries, len(val)) diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/functions.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/functions.go index 86f5c381f2..548a656b3e 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/scan/functions.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/functions.go @@ -8,17 +8,21 @@ import ( "github.com/prometheus/prometheus/model/histogram" + "github.com/thanos-io/promql-engine/execution/aggregate" "github.com/thanos-io/promql-engine/execution/parse" + "github.com/thanos-io/promql-engine/ringbuffer" ) -type Sample struct { - T int64 +type Value struct { F float64 H *histogram.FloatHistogram } +type Sample ringbuffer.Sample[Value] +type SamplesBuffer ringbuffer.RingBuffer[Value] + type FunctionArgs struct { - Samples []Sample + Samples []ringbuffer.Sample[Value] StepTime int64 SelectRange int64 ScalarPoints []float64 @@ -28,16 +32,16 @@ type FunctionArgs struct { type FunctionCall func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool) -func instantValue(samples []Sample, isRate bool) (float64, bool) { +func instantValue(samples []ringbuffer.Sample[Value], isRate bool) (float64, bool) { lastSample := samples[len(samples)-1] previousSample := samples[len(samples)-2] var resultValue float64 - if isRate && lastSample.F < previousSample.F { + if isRate && lastSample.V.F < previousSample.V.F { // Counter reset. - resultValue = lastSample.F + resultValue = lastSample.V.F } else { - resultValue = lastSample.F - previousSample.F + resultValue = lastSample.V.F - previousSample.V.F } sampledInterval := lastSample.T - previousSample.T @@ -101,7 +105,7 @@ var rangeVectorFuncs = map[string]FunctionCall{ if len(f.Samples) == 0 { return 0., nil, false } - return f.Samples[len(f.Samples)-1].F, nil, true + return f.Samples[len(f.Samples)-1].V.F, nil, true }, "present_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool) { if len(f.Samples) == 0 { @@ -109,6 +113,16 @@ var rangeVectorFuncs = map[string]FunctionCall{ } return 1., nil, true }, + "quantile_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool) { + if len(f.Samples) == 0 { + return 0., nil, false + } + floats := make([]float64, len(f.Samples)) + for i, sample := range f.Samples { + floats[i] = sample.V.F + } + return aggregate.Quantile(f.ScalarPoints[0], floats), nil, true + }, "changes": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool) { if len(f.Samples) == 0 { return 0., nil, false @@ -214,7 +228,7 @@ func NewRangeVectorFunc(name string) (FunctionCall, error) { // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(samples []Sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (float64, *histogram.FloatHistogram) { +func extrapolatedRate(samples []ringbuffer.Sample[Value], isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (float64, *histogram.FloatHistogram) { var ( rangeStart = stepTime - (selectRange + offset) rangeEnd = stepTime - offset @@ -222,17 +236,17 @@ func extrapolatedRate(samples []Sample, isCounter, isRate bool, stepTime int64, resultHistogram *histogram.FloatHistogram ) - if samples[0].H != nil { + if samples[0].V.H != nil { resultHistogram = histogramRate(samples, isCounter) } else { - resultValue = samples[len(samples)-1].F - samples[0].F + resultValue = samples[len(samples)-1].V.F - samples[0].V.F if isCounter { var lastValue float64 for _, sample := range samples { - if sample.F < lastValue { + if sample.V.F < lastValue { resultValue += lastValue } - lastValue = sample.F + lastValue = sample.V.F } } } @@ -244,7 +258,7 @@ func extrapolatedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sampledInterval := float64(samples[len(samples)-1].T-samples[0].T) / 1000 averageDurationBetweenSamples := sampledInterval / float64(len(samples)-1) - if isCounter && resultValue > 0 && samples[0].F >= 0 { + if isCounter && resultValue > 0 && samples[0].V.F >= 0 { // Counters cannot be negative. If we have any slope at // all (i.e. resultValue went up), we can extrapolate // the zero point of the counter. If the duration to the @@ -252,7 +266,7 @@ func extrapolatedRate(samples []Sample, isCounter, isRate bool, stepTime int64, // take the zero point as the start of the series, // thereby avoiding extrapolation to negative counter // values. - durationToZero := sampledInterval * (samples[0].F / resultValue) + durationToZero := sampledInterval * (samples[0].V.F / resultValue) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -293,7 +307,7 @@ func extrapolatedRate(samples []Sample, isCounter, isRate bool, stepTime int64, // It calculates the rate (allowing for counter resets if isCounter is true), // taking into account the last sample before the range start, and returns // the result as either per-second (if isRate is true) or overall. -func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) { +func extendedRate(samples []ringbuffer.Sample[Value], isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) { var ( rangeStart = stepTime - (selectRange + offset) rangeEnd = stepTime - offset @@ -301,7 +315,7 @@ func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sele resultHistogram *histogram.FloatHistogram ) - if samples[0].H != nil { + if samples[0].V.H != nil { // TODO - support extended rate for histograms resultHistogram = histogramRate(samples, isCounter) return resultValue, resultHistogram @@ -309,7 +323,7 @@ func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sele sameVals := true for i := range samples { - if i > 0 && samples[i-1].F != samples[i].F { + if i > 0 && samples[i-1].V.F != samples[i].V.F { sameVals = false break } @@ -321,7 +335,7 @@ func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sele if isCounter && !isRate && sameVals { // Make sure we are not at the end of the range. if stepTime-offset <= until { - return samples[0].F, nil + return samples[0].V.F, nil } } @@ -349,13 +363,13 @@ func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sele if isCounter { for i := firstPoint; i < len(samples); i++ { sample := samples[i] - if sample.F < lastValue { + if sample.V.F < lastValue { counterCorrection += lastValue } - lastValue = sample.F + lastValue = sample.V.F } } - resultValue = samples[len(samples)-1].F - samples[firstPoint].F + counterCorrection + resultValue = samples[len(samples)-1].V.F - samples[firstPoint].V.F + counterCorrection // Duration between last sample and boundary of range. durationToEnd := float64(rangeEnd - samples[len(samples)-1].T) @@ -380,14 +394,14 @@ func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, sele // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is // not a histogram. -func histogramRate(points []Sample, isCounter bool) *histogram.FloatHistogram { +func histogramRate(points []ringbuffer.Sample[Value], isCounter bool) *histogram.FloatHistogram { // Calculating a rate on a single sample is not defined. if len(points) < 2 { return nil } - prev := points[0].H // We already know that this is a histogram. - last := points[len(points)-1].H + prev := points[0].V.H // We already know that this is a histogram. + last := points[len(points)-1].V.H if last == nil { return nil // Range contains a mix of histograms and floats. } @@ -402,7 +416,7 @@ func histogramRate(points []Sample, isCounter bool) *histogram.FloatHistogram { // - Are all data points histograms? // []FloatPoint and a []HistogramPoint separately. for _, currPoint := range points[1 : len(points)-1] { - curr := currPoint.H + curr := currPoint.V.H if curr == nil { return nil // Range contains a mix of histograms and floats. } @@ -420,7 +434,7 @@ func histogramRate(points []Sample, isCounter bool) *histogram.FloatHistogram { if isCounter { // Second iteration to deal with counter resets. for _, currPoint := range points[1:] { - curr := currPoint.H + curr := currPoint.V.H if curr.DetectReset(prev) { h.Add(prev) } @@ -431,42 +445,42 @@ func histogramRate(points []Sample, isCounter bool) *histogram.FloatHistogram { return h.Compact(0) } -func maxOverTime(points []Sample) float64 { - max := points[0].F +func maxOverTime(points []ringbuffer.Sample[Value]) float64 { + max := points[0].V.F for _, v := range points { - if v.F > max || math.IsNaN(max) { - max = v.F + if v.V.F > max || math.IsNaN(max) { + max = v.V.F } } return max } -func minOverTime(points []Sample) float64 { - min := points[0].F +func minOverTime(points []ringbuffer.Sample[Value]) float64 { + min := points[0].V.F for _, v := range points { - if v.F < min || math.IsNaN(min) { - min = v.F + if v.V.F < min || math.IsNaN(min) { + min = v.V.F } } return min } -func countOverTime(points []Sample) float64 { +func countOverTime(points []ringbuffer.Sample[Value]) float64 { return float64(len(points)) } -func avgOverTime(points []Sample) float64 { +func avgOverTime(points []ringbuffer.Sample[Value]) float64 { var mean, count, c float64 for _, v := range points { count++ if math.IsInf(mean, 0) { - if math.IsInf(v.F, 0) && (mean > 0) == (v.F > 0) { - // The `mean` and `v.F` values are `Inf` of the same sign. They + if math.IsInf(v.V.F, 0) && (mean > 0) == (v.V.F > 0) { + // The `mean` and `v.V.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. continue } - if !math.IsInf(v.F, 0) && !math.IsNaN(v.F) { + if !math.IsInf(v.V.F, 0) && !math.IsNaN(v.V.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -476,7 +490,7 @@ func avgOverTime(points []Sample) float64 { continue } } - mean, c = kahanSumInc(v.F/count-mean/count, mean, c) + mean, c = kahanSumInc(v.V.F/count-mean/count, mean, c) } if math.IsInf(mean, 0) { @@ -485,10 +499,10 @@ func avgOverTime(points []Sample) float64 { return mean + c } -func sumOverTime(points []Sample) float64 { +func sumOverTime(points []ringbuffer.Sample[Value]) float64 { var sum, c float64 for _, v := range points { - sum, c = kahanSumInc(v.F, sum, c) + sum, c = kahanSumInc(v.V.F, sum, c) } if math.IsInf(sum, 0) { return sum @@ -496,38 +510,38 @@ func sumOverTime(points []Sample) float64 { return sum + c } -func stddevOverTime(points []Sample) float64 { +func stddevOverTime(points []ringbuffer.Sample[Value]) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 for _, v := range points { count++ - delta := v.F - (mean + cMean) + delta := v.V.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.F-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(v.V.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) } -func stdvarOverTime(points []Sample) float64 { +func stdvarOverTime(points []ringbuffer.Sample[Value]) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 for _, v := range points { count++ - delta := v.F - (mean + cMean) + delta := v.V.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.F-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(v.V.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count } -func changes(points []Sample) float64 { +func changes(points []ringbuffer.Sample[Value]) float64 { var count float64 - prev := points[0].F + prev := points[0].V.F count = 0 for _, sample := range points[1:] { - current := sample.F + current := sample.V.F if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { count++ } @@ -536,7 +550,7 @@ func changes(points []Sample) float64 { return count } -func deriv(points []Sample) float64 { +func deriv(points []ringbuffer.Sample[Value]) float64 { // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 @@ -544,11 +558,11 @@ func deriv(points []Sample) float64 { return slope } -func resets(points []Sample) float64 { +func resets(points []ringbuffer.Sample[Value]) float64 { count := 0 - prev := points[0].F + prev := points[0].V.F for _, sample := range points[1:] { - current := sample.F + current := sample.V.F if current < prev { count++ } @@ -558,7 +572,7 @@ func resets(points []Sample) float64 { return float64(count) } -func linearRegression(Samples []Sample, interceptTime int64) (slope, intercept float64) { +func linearRegression(Samples []ringbuffer.Sample[Value], interceptTime int64) (slope, intercept float64) { var ( n float64 sumX, cX float64 @@ -568,18 +582,18 @@ func linearRegression(Samples []Sample, interceptTime int64) (slope, intercept f initY float64 constY bool ) - initY = Samples[0].F + initY = Samples[0].V.F constY = true for i, sample := range Samples { // Set constY to false if any new y values are encountered. - if constY && i > 0 && sample.F != initY { + if constY && i > 0 && sample.V.F != initY { constY = false } n += 1.0 x := float64(sample.T-interceptTime) / 1e3 sumX, cX = kahanSumInc(x, sumX, cX) - sumY, cY = kahanSumInc(sample.F, sumY, cY) - sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY) + sumY, cY = kahanSumInc(sample.V.F, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.V.F, sumXY, cXY) sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } if constY { @@ -601,10 +615,10 @@ func linearRegression(Samples []Sample, interceptTime int64) (slope, intercept f return slope, intercept } -func filterFloatOnlySamples(samples []Sample) []Sample { +func filterFloatOnlySamples(samples []ringbuffer.Sample[Value]) []ringbuffer.Sample[Value] { i := 0 for _, sample := range samples { - if sample.H == nil { + if sample.V.H == nil { samples[i] = sample i++ } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go index c65192f14f..382e5dd3f3 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go @@ -32,7 +32,9 @@ type numberLiteralSelector struct { } func NewNumberLiteralSelector(pool *model.VectorPool, opts *query.Options, val float64) *numberLiteralSelector { - op := &numberLiteralSelector{ + return &numberLiteralSelector{ + OperatorTelemetry: model.NewTelemetry("[numberLiteral]", opts.EnableAnalysis), + vectorPool: pool, numSteps: opts.NumSteps(), mint: opts.Start.UnixMilli(), @@ -41,25 +43,16 @@ func NewNumberLiteralSelector(pool *model.VectorPool, opts *query.Options, val f currentStep: opts.Start.UnixMilli(), val: val, } - - op.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - op.OperatorTelemetry = &model.TrackedTelemetry{} - } - - return op -} - -func (o *numberLiteralSelector) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*numberLiteralSelector] ") - return o, nil } func (o *numberLiteralSelector) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*numberLiteralSelector] %v", o.val), nil + return fmt.Sprintf("[numberLiteral] %v", o.val), nil } func (o *numberLiteralSelector) Series(context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + o.loadSeries() return o.series, nil } @@ -69,12 +62,14 @@ func (o *numberLiteralSelector) GetPool() *model.VectorPool { } func (o *numberLiteralSelector) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() if o.currentStep > o.maxt { return nil, nil @@ -98,7 +93,6 @@ func (o *numberLiteralSelector) Next(ctx context.Context) ([]model.StepVector, e o.step = 1 } o.currentStep += o.step * int64(o.numSteps) - o.AddExecutionTimeTaken(time.Since(start)) return vectors, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/matrix_selector.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/matrix_selector.go index 94c2ba36ac..0255534431 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/scan/matrix_selector.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/matrix_selector.go @@ -12,35 +12,39 @@ import ( "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "golang.org/x/exp/slices" "github.com/thanos-io/promql-engine/execution/function" "github.com/thanos-io/promql-engine/execution/model" engstore "github.com/thanos-io/promql-engine/execution/storage" "github.com/thanos-io/promql-engine/extlabels" "github.com/thanos-io/promql-engine/query" + "github.com/thanos-io/promql-engine/ringbuffer" ) type matrixScanner struct { labels labels.Labels signature uint64 - previousSamples []Sample + previousSamples []ringbuffer.Sample[Value] samples *storage.BufferedSeriesIterator metricAppearedTs *int64 deltaReduced bool } type matrixSelector struct { - funcExpr *parser.Call - storage engstore.SeriesSelector - call FunctionCall - scanners []matrixScanner - series []labels.Labels - once sync.Once + model.OperatorTelemetry - vectorPool *model.VectorPool + vectorPool *model.VectorPool + functionName string + storage engstore.SeriesSelector + scalarArgs []float64 + call FunctionCall + scanners []matrixScanner + bufferTail []ringbuffer.Sample[Value] + series []labels.Labels + once sync.Once numSteps int mint int64 @@ -59,7 +63,6 @@ type matrixSelector struct { // Lookback delta for extended range functions. extLookbackDelta int64 - model.OperatorTelemetry } var ErrNativeHistogramsNotSupported = errors.New("native histograms are not supported in extended range functions") @@ -68,22 +71,27 @@ var ErrNativeHistogramsNotSupported = errors.New("native histograms are not supp func NewMatrixSelector( pool *model.VectorPool, selector engstore.SeriesSelector, - funcExpr *parser.Call, + functionName string, + arg float64, opts *query.Options, selectRange, offset time.Duration, batchSize int64, shard, numShard int, ) (model.VectorOperator, error) { - call, err := NewRangeVectorFunc(funcExpr.Func.Name) + call, err := NewRangeVectorFunc(functionName) if err != nil { return nil, err } - isExtFunction := function.IsExtFunction(funcExpr.Func.Name) + isExtFunction := function.IsExtFunction(functionName) m := &matrixSelector{ - storage: selector, - call: call, - funcExpr: funcExpr, - vectorPool: pool, + OperatorTelemetry: model.NewTelemetry("matrixSelector", opts.EnableAnalysis), + + storage: selector, + call: call, + functionName: functionName, + vectorPool: pool, + scalarArgs: []float64{arg}, + bufferTail: make([]ringbuffer.Sample[Value], 16), numSteps: opts.NumSteps(), mint: opts.Start.UnixMilli(), @@ -101,10 +109,7 @@ func NewMatrixSelector( extLookbackDelta: opts.ExtLookbackDelta.Milliseconds(), } - m.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - m.OperatorTelemetry = &model.TrackedTelemetry{} - } + // For instant queries, set the step to a positive value // so that the operator can terminate. if m.step == 0 { @@ -114,20 +119,18 @@ func NewMatrixSelector( return m, nil } -func (o *matrixSelector) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*matrixSelector]") - return o, nil -} - func (o *matrixSelector) Explain() (me string, next []model.VectorOperator) { r := time.Duration(o.selectRange) * time.Millisecond if o.call != nil { - return fmt.Sprintf("[*matrixSelector] %v({%v}[%s] %v mod %v)", o.funcExpr.Func.Name, o.storage.Matchers(), r, o.shard, o.numShards), nil + return fmt.Sprintf("[matrixSelector] %v({%v}[%s] %v mod %v)", o.functionName, o.storage.Matchers(), r, o.shard, o.numShards), nil } - return fmt.Sprintf("[*matrixSelector] {%v}[%s] %v mod %v", o.storage.Matchers(), r, o.shard, o.numShards), nil + return fmt.Sprintf("[matrixSelector] {%v}[%s] %v mod %v", o.storage.Matchers(), r, o.shard, o.numShards), nil } func (o *matrixSelector) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + if err := o.loadSeries(ctx); err != nil { return nil, err } @@ -139,18 +142,18 @@ func (o *matrixSelector) GetPool() *model.VectorPool { } func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() - defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() if o.currentStep > o.maxt { return nil, nil } - if err := o.loadSeries(ctx); err != nil { return nil, err } @@ -175,11 +178,11 @@ func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) { maxt := seriesTs - o.offset mint := maxt - o.selectRange - var rangeSamples []Sample + var rangeSamples []ringbuffer.Sample[Value] var err error if !o.isExtFunction { - rangeSamples, err = selectPoints(series.samples, mint, maxt, series.previousSamples) + rangeSamples, o.bufferTail, err = selectPoints(series.samples, mint, maxt, series.previousSamples, o.bufferTail) } else { rangeSamples, err = selectExtPoints(series.samples, mint, maxt, series.previousSamples, o.extLookbackDelta, &series.metricAppearedTs) } @@ -197,6 +200,7 @@ func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) { StepTime: seriesTs, SelectRange: o.selectRange, Offset: o.offset, + ScalarPoints: o.scalarArgs, MetricAppearedTs: series.metricAppearedTs, }) @@ -245,7 +249,7 @@ func (o *matrixSelector) loadSeries(ctx context.Context) error { b := labels.ScratchBuilder{} for i, s := range series { lbls := s.Labels() - if o.funcExpr.Func.Name != "last_over_time" { + if o.functionName != "last_over_time" { // This modifies the array in place. Because labels.Labels // can be re-used between different Select() calls, it means that // we have to copy it here. @@ -286,7 +290,11 @@ func (o *matrixSelector) loadSeries(ctx context.Context) error { // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. // TODO(fpetkovski): Add max samples limit. -func selectPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Sample) ([]Sample, error) { +func selectPoints( + it *storage.BufferedSeriesIterator, + mint, maxt int64, + out, tail []ringbuffer.Sample[Value], +) ([]ringbuffer.Sample[Value], []ringbuffer.Sample[Value], error) { if len(out) > 0 && out[len(out)-1].T >= mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: @@ -296,7 +304,11 @@ func selectPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Sa var drop int for drop = 0; out[drop].T < mint; drop++ { } + // Rotate the slice around drop and reduce the length to remove samples. + tail = slices.Grow(tail, drop)[:drop] + copy(tail, out[:drop]) copy(out, out[drop:]) + copy(out[len(out)-drop:], tail) out = out[:len(out)-drop] // Only append points with timestamps after the last timestamp we have. mint = out[len(out)-1].T + 1 @@ -307,7 +319,7 @@ func selectPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Sa soughtValueType := it.Seek(maxt) if soughtValueType == chunkenc.ValNone { if it.Err() != nil { - return nil, it.Err() + return nil, tail, it.Err() } } @@ -318,12 +330,19 @@ loop: case chunkenc.ValNone: break loop case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: - t, fh := buf.AtFloatHistogram() - if value.IsStaleNaN(fh.Sum) { - continue loop - } + t := buf.AtT() if t >= mint { - out = append(out, Sample{T: t, H: fh}) + n := len(out) + if cap(out) > n { + out = out[:len(out)+1] + } else { + out = append(out, ringbuffer.Sample[Value]{}) + } + out[n].T, out[n].V.H = buf.AtFloatHistogram(out[n].V.H) + if value.IsStaleNaN(out[n].V.H.Sum) { + out = out[:n] + continue loop + } } case chunkenc.ValFloat: t, v := buf.At() @@ -332,7 +351,13 @@ loop: } // Values in the buffer are guaranteed to be smaller than maxt. if t >= mint { - out = append(out, Sample{T: t, F: v}) + n := len(out) + if cap(out) > n { + out = out[:len(out)+1] + } else { + out = append(out, ringbuffer.Sample[Value]{}) + } + out[n].T, out[n].V.F, out[n].V.H = t, v, nil } } } @@ -340,18 +365,34 @@ loop: // The sought sample might also be in the range. switch soughtValueType { case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: - t, fh := it.AtFloatHistogram() - if t == maxt && !value.IsStaleNaN(fh.Sum) { - out = append(out, Sample{T: t, H: fh}) + t := it.AtT() + if t != maxt { + break + } + _, fh := it.AtFloatHistogram() + if !value.IsStaleNaN(fh.Sum) { + n := len(out) + if cap(out) > n { + out = out[:len(out)+1] + } else { + out = append(out, ringbuffer.Sample[Value]{}) + } + out[n].T, out[n].V.H = t, fh.Copy() } case chunkenc.ValFloat: t, v := it.At() if t == maxt && !value.IsStaleNaN(v) { - out = append(out, Sample{T: t, F: v}) + n := len(out) + if cap(out) > n { + out = out[:len(out)+1] + } else { + out = append(out, ringbuffer.Sample[Value]{}) + } + out[n].T, out[n].V.F, out[n].V.H = t, v, nil } } - return out, nil + return out, tail, nil } // matrixIterSlice populates a matrix vector covering the requested range for a @@ -363,7 +404,7 @@ loop: // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. // TODO(fpetkovski): Add max samples limit. -func selectExtPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Sample, extLookbackDelta int64, metricAppearedTs **int64) ([]Sample, error) { +func selectExtPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []ringbuffer.Sample[Value], extLookbackDelta int64, metricAppearedTs **int64) ([]ringbuffer.Sample[Value], error) { extMint := mint - extLookbackDelta selectsNativeHistograms := false @@ -410,15 +451,22 @@ loop: break loop case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: selectsNativeHistograms = true - t, fh := buf.AtFloatHistogram() - if value.IsStaleNaN(fh.Sum) { - continue loop - } - if *metricAppearedTs == nil { - *metricAppearedTs = &t - } + t := buf.AtT() if t >= mint { - out = append(out, Sample{T: t, H: fh}) + n := len(out) + if cap(out) > n { + out = out[:len(out)+1] + } else { + out = append(out, ringbuffer.Sample[Value]{}) + } + out[n].T, out[n].V.H = buf.AtFloatHistogram(out[n].V.H) + + if value.IsStaleNaN(out[n].V.H.Sum) { + continue loop + } + if *metricAppearedTs == nil { + *metricAppearedTs = &t + } } case chunkenc.ValFloat: t, v := buf.At() @@ -433,10 +481,10 @@ loop: // exists at or before range start, add it and then keep replacing // it with later points while not yet (strictly) inside the range. if t >= mint || !appendedPointBeforeMint { - out = append(out, Sample{T: t, F: v}) + out = append(out, ringbuffer.Sample[Value]{T: t, V: Value{F: v}}) appendedPointBeforeMint = true } else { - out[len(out)-1] = Sample{T: t, F: v} + out[len(out)-1] = ringbuffer.Sample[Value]{T: t, V: Value{F: v}} } } @@ -451,7 +499,7 @@ loop: if *metricAppearedTs == nil { *metricAppearedTs = &t } - out = append(out, Sample{T: t, H: fh}) + out = append(out, ringbuffer.Sample[Value]{T: t, V: Value{H: fh}}) } case chunkenc.ValFloat: t, v := it.At() @@ -459,7 +507,7 @@ loop: if *metricAppearedTs == nil { *metricAppearedTs = &t } - out = append(out, Sample{T: t, F: v}) + out = append(out, ringbuffer.Sample[Value]{T: t, V: Value{F: v}}) } } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go index de31130819..df9057d585 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" @@ -14,23 +15,30 @@ import ( "github.com/thanos-io/promql-engine/execution/model" "github.com/thanos-io/promql-engine/extlabels" "github.com/thanos-io/promql-engine/query" + "github.com/thanos-io/promql-engine/ringbuffer" ) -// TODO: only instant subqueries for now. type subqueryOperator struct { + model.OperatorTelemetry + next model.VectorOperator pool *model.VectorPool call FunctionCall mint int64 maxt int64 currentStep int64 + step int64 + stepsBatch int funcExpr *parser.Call subQuery *parser.SubqueryExpr onceSeries sync.Once series []labels.Labels - acc [][]Sample + + lastVectors []model.StepVector + lastCollected int + buffers []*ringbuffer.RingBuffer[Value] } func NewSubqueryOperator(pool *model.VectorPool, next model.VectorOperator, opts *query.Options, funcExpr *parser.Call, subQuery *parser.SubqueryExpr) (model.VectorOperator, error) { @@ -38,25 +46,37 @@ func NewSubqueryOperator(pool *model.VectorPool, next model.VectorOperator, opts if err != nil { return nil, err } + step := opts.Step.Milliseconds() + if step == 0 { + step = 1 + } return &subqueryOperator{ - next: next, - call: call, - pool: pool, - funcExpr: funcExpr, - subQuery: subQuery, - mint: opts.Start.UnixMilli(), - maxt: opts.End.UnixMilli(), - currentStep: opts.Start.UnixMilli(), + OperatorTelemetry: model.NewTelemetry("[subquery]", opts.EnableAnalysis), + + next: next, + call: call, + pool: pool, + funcExpr: funcExpr, + subQuery: subQuery, + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + currentStep: opts.Start.UnixMilli(), + step: step, + stepsBatch: opts.StepsBatch, + lastCollected: -1, }, nil } func (o *subqueryOperator) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*subqueryOperator] %v()", o.funcExpr.Func.Name), []model.VectorOperator{o.next} + return fmt.Sprintf("[subquery] %v()", o.funcExpr.Func.Name), []model.VectorOperator{o.next} } func (o *subqueryOperator) GetPool() *model.VectorPool { return o.pool } func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.OperatorTelemetry.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() @@ -69,48 +89,96 @@ func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error) return nil, err } -ACC: - for { - vectors, err := o.next.Next(ctx) - if err != nil { - return nil, err + res := o.pool.GetVectorBatch() + for i := 0; o.currentStep <= o.maxt && i < o.stepsBatch; i++ { + mint := o.currentStep - o.subQuery.Range.Milliseconds() - o.subQuery.OriginalOffset.Milliseconds() + maxt := o.currentStep - o.subQuery.OriginalOffset.Milliseconds() + for _, b := range o.buffers { + b.DropBefore(mint) } - if len(vectors) == 0 { - break ACC + if len(o.lastVectors) > 0 { + for _, v := range o.lastVectors[o.lastCollected+1:] { + if v.T > maxt { + break + } + o.collect(v, mint) + o.lastCollected++ + } + if o.lastCollected == len(o.lastVectors)-1 { + o.next.GetPool().PutVectors(o.lastVectors) + o.lastVectors = nil + o.lastCollected = -1 + } } - for _, vector := range vectors { - for j, s := range vector.Samples { - o.acc[vector.SampleIDs[j]] = append(o.acc[vector.SampleIDs[j]], Sample{T: vector.T, F: s}) + + ACC: + for len(o.lastVectors) == 0 { + vectors, err := o.next.Next(ctx) + if err != nil { + return nil, err } - o.next.GetPool().PutStepVector(vector) + if len(vectors) == 0 { + break ACC + } + for j, vector := range vectors { + if vector.T > maxt { + o.lastVectors = vectors + o.lastCollected = j - 1 + break ACC + } + o.collect(vector, mint) + } + o.next.GetPool().PutVectors(vectors) } - o.next.GetPool().PutVectors(vectors) - } - res := o.pool.GetVectorBatch() - sv := o.pool.GetStepVector(o.currentStep) - for sampleId, rangeSamples := range o.acc { - f, h, ok := o.call(FunctionArgs{ - Samples: rangeSamples, - StepTime: o.currentStep, - SelectRange: o.subQuery.Range.Milliseconds(), - Offset: o.subQuery.Offset.Milliseconds(), - }) - if ok { - if h != nil { - sv.AppendHistogram(o.pool, uint64(sampleId), h) - } else { - sv.AppendSample(o.pool, uint64(sampleId), f) + sv := o.pool.GetStepVector(o.currentStep) + for sampleId, rangeSamples := range o.buffers { + f, h, ok := o.call(FunctionArgs{ + Samples: rangeSamples.Samples(), + StepTime: maxt, + SelectRange: o.subQuery.Range.Milliseconds(), + }) + if ok { + if h != nil { + sv.AppendHistogram(o.pool, uint64(sampleId), h) + } else { + sv.AppendSample(o.pool, uint64(sampleId), f) + } } } - } - res = append(res, sv) + res = append(res, sv) - o.currentStep++ + o.currentStep += o.step + } return res, nil } +func (o *subqueryOperator) collect(v model.StepVector, mint int64) { + if v.T < mint { + o.next.GetPool().PutStepVector(v) + return + } + for i, s := range v.Samples { + buffer := o.buffers[v.SampleIDs[i]] + if buffer.Len() > 0 && v.T <= buffer.MaxT() { + continue + } + buffer.Push(v.T, Value{F: s}) + } + for i, s := range v.Histograms { + buffer := o.buffers[v.HistogramIDs[i]] + if buffer.Len() > 0 && v.T < buffer.MaxT() { + continue + } + buffer.Push(v.T, Value{H: s}) + } + o.next.GetPool().PutStepVector(v) +} + func (o *subqueryOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.OperatorTelemetry.AddExecutionTimeTaken(time.Since(start)) }() + if err := o.initSeries(ctx); err != nil { return nil, err } @@ -127,7 +195,10 @@ func (o *subqueryOperator) initSeries(ctx context.Context) error { } o.series = make([]labels.Labels, len(series)) - o.acc = make([][]Sample, len(series)) + o.buffers = make([]*ringbuffer.RingBuffer[Value], len(series)) + for i := range o.buffers { + o.buffers[i] = ringbuffer.New[Value](8) + } var b labels.ScratchBuilder for i, s := range series { lbls := s @@ -136,6 +207,7 @@ func (o *subqueryOperator) initSeries(ctx context.Context) error { } o.series[i] = lbls } + o.pool.SetStepSize(len(o.series)) }) return err } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/vector_selector.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/vector_selector.go index 8337269aed..ee21ef3279 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/scan/vector_selector.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/vector_selector.go @@ -10,7 +10,6 @@ import ( "time" "github.com/efficientgo/core/errors" - "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/thanos-io/promql-engine/execution/model" engstore "github.com/thanos-io/promql-engine/execution/storage" @@ -19,8 +18,8 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) type vectorScanner struct { @@ -52,6 +51,8 @@ type vectorSelector struct { shard int numShards int + + selectTimestamp bool } // NewVectorSelector creates operator which selects vector of series. @@ -60,13 +61,16 @@ func NewVectorSelector( selector engstore.SeriesSelector, queryOpts *query.Options, offset time.Duration, + hints storage.SelectHints, batchSize int64, + selectTimestamp bool, shard, numShards int, ) model.VectorOperator { o := &vectorSelector{ - OperatorTelemetry: &model.NoopTelemetry{}, - storage: selector, - vectorPool: pool, + OperatorTelemetry: model.NewTelemetry("[vectorSelector]", queryOpts.EnableAnalysis), + + storage: selector, + vectorPool: pool, mint: queryOpts.Start.UnixMilli(), maxt: queryOpts.End.UnixMilli(), @@ -79,9 +83,8 @@ func NewVectorSelector( shard: shard, numShards: numShards, - } - if queryOpts.EnableAnalysis { - o.OperatorTelemetry = &model.TrackedTelemetry{} + + selectTimestamp: selectTimestamp, } // For instant queries, set the step to a positive value // so that the operator can terminate. @@ -92,16 +95,14 @@ func NewVectorSelector( return o } -func (o *vectorSelector) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - o.SetName("[*vectorSelector]") - return o, nil -} - func (o *vectorSelector) Explain() (me string, next []model.VectorOperator) { - return fmt.Sprintf("[*vectorSelector] {%v} %v mod %v", o.storage.Matchers(), o.shard, o.numShards), nil + return fmt.Sprintf("[vectorSelector] {%v} %v mod %v", o.storage.Matchers(), o.shard, o.numShards), nil } func (o *vectorSelector) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + if err := o.loadSeries(ctx); err != nil { return nil, err } @@ -113,6 +114,9 @@ func (o *vectorSelector) GetPool() *model.VectorPool { } func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() @@ -122,8 +126,6 @@ func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) { return nil, nil } - start := time.Now() - defer func() { o.AddExecutionTimeTaken(time.Since(start)) }() if err := o.loadSeries(ctx); err != nil { return nil, err } @@ -144,12 +146,15 @@ func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) { seriesTs = ts ) for currStep := 0; currStep < o.numSteps && seriesTs <= o.maxt; currStep++ { - _, v, h, ok, err := selectPoint(series.samples, seriesTs, o.lookbackDelta, o.offset) + t, v, h, ok, err := selectPoint(series.samples, seriesTs, o.lookbackDelta, o.offset) if err != nil { return nil, err } + if o.selectTimestamp { + v = float64(t) / 1000 + } if ok { - if h != nil { + if h != nil && !o.selectTimestamp { vectors[currStep].AppendHistogram(o.vectorPool, series.signature, h) } else { vectors[currStep].AppendSample(o.vectorPool, series.signature, v) @@ -162,7 +167,6 @@ func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) { o.currentStep += o.step * int64(o.numSteps) o.currentSeries = 0 } - return vectors, nil } @@ -175,6 +179,7 @@ func (o *vectorSelector) loadSeries(ctx context.Context) error { return } + b := labels.NewBuilder(labels.EmptyLabels()) o.scanners = make([]vectorScanner, len(series)) o.series = make([]labels.Labels, len(series)) for i, s := range series { @@ -183,7 +188,13 @@ func (o *vectorSelector) loadSeries(ctx context.Context) error { signature: s.Signature, samples: storage.NewMemoizedIterator(s.Iterator(nil), o.lookbackDelta), } - o.series[i] = s.Labels() + b.Reset(s.Labels()) + // if we have pushed down a timestamp function into the scan we need to drop + // the __name__ label + if o.selectTimestamp { + b.Del(labels.MetricName) + } + o.series[i] = b.Labels() } numSeries := int64(len(o.series)) diff --git a/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go b/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go index 5654b40778..e4126148b8 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go @@ -10,10 +10,10 @@ import ( "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/logicalplan" "github.com/thanos-io/promql-engine/query" ) @@ -35,17 +35,8 @@ type stepInvariantOperator struct { model.OperatorTelemetry } -func (u *stepInvariantOperator) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - u.SetName("[*stepInvariantOperator]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := u.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - return u, next -} - func (u *stepInvariantOperator) Explain() (me string, next []model.VectorOperator) { - return "[*stepInvariantOperator]", []model.VectorOperator{u.next} + return "[stepInvariant]", []model.VectorOperator{u.next} } func NewStepInvariantOperator( @@ -54,36 +45,36 @@ func NewStepInvariantOperator( expr parser.Expr, opts *query.Options, ) (model.VectorOperator, error) { - interval := opts.Step.Milliseconds() // We set interval to be at least 1. - if interval == 0 { - interval = 1 - } u := &stepInvariantOperator{ + OperatorTelemetry: model.NewTelemetry("[stepInvariant]", opts.EnableAnalysis), + vectorPool: pool, next: next, currentStep: opts.Start.UnixMilli(), mint: opts.Start.UnixMilli(), maxt: opts.End.UnixMilli(), - step: interval, + step: opts.Step.Milliseconds(), stepsBatch: opts.StepsBatch, cacheResult: true, } + if u.step == 0 { + u.step = 1 + } // We do not duplicate results for range selectors since result is a matrix // with their unique timestamps which does not depend on the step. switch expr.(type) { - case *parser.MatrixSelector, *parser.SubqueryExpr: + case *logicalplan.MatrixSelector, *parser.SubqueryExpr: u.cacheResult = false } - u.OperatorTelemetry = &model.NoopTelemetry{} - if opts.EnableAnalysis { - u.OperatorTelemetry = &model.TrackedTelemetry{} - } return u, nil } func (u *stepInvariantOperator) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { u.AddExecutionTimeTaken(time.Since(start)) }() + var err error u.seriesOnce.Do(func() { u.series, err = u.next.Series(ctx) @@ -100,10 +91,8 @@ func (u *stepInvariantOperator) GetPool() *model.VectorPool { } func (u *stepInvariantOperator) Next(ctx context.Context) ([]model.StepVector, error) { - if u.currentStep > u.maxt { - return nil, nil - } start := time.Now() + defer func() { u.AddExecutionTimeTaken(time.Since(start)) }() select { case <-ctx.Done(): @@ -111,6 +100,10 @@ func (u *stepInvariantOperator) Next(ctx context.Context) ([]model.StepVector, e default: } + if u.currentStep > u.maxt { + return nil, nil + } + if !u.cacheResult { return u.next.Next(ctx) } @@ -127,7 +120,6 @@ func (u *stepInvariantOperator) Next(ctx context.Context) ([]model.StepVector, e result = append(result, outVector) u.currentStep += u.step } - u.AddExecutionTimeTaken(time.Since(start)) return result, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go b/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go index e1a0a18aa2..4200752a9e 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go @@ -12,6 +12,7 @@ import ( "gonum.org/v1/gonum/floats" "github.com/thanos-io/promql-engine/execution/model" + "github.com/thanos-io/promql-engine/query" ) type unaryNegation struct { @@ -22,31 +23,23 @@ type unaryNegation struct { model.OperatorTelemetry } -func (u *unaryNegation) Explain() (me string, next []model.VectorOperator) { - return "[*unaryNegation]", []model.VectorOperator{u.next} -} - -func NewUnaryNegation( - next model.VectorOperator, - stepsBatch int, -) (model.VectorOperator, error) { +func NewUnaryNegation(next model.VectorOperator, opts *query.Options) (model.VectorOperator, error) { u := &unaryNegation{ next: next, - OperatorTelemetry: &model.TrackedTelemetry{}, + OperatorTelemetry: model.NewTelemetry("[unaryNegation]", opts.EnableAnalysis), } return u, nil } -func (u *unaryNegation) Analyze() (model.OperatorTelemetry, []model.ObservableVectorOperator) { - u.SetName("[*unaryNegation]") - next := make([]model.ObservableVectorOperator, 0, 1) - if obsnext, ok := u.next.(model.ObservableVectorOperator); ok { - next = append(next, obsnext) - } - return u, next + +func (u *unaryNegation) Explain() (me string, next []model.VectorOperator) { + return "[unaryNegation]", []model.VectorOperator{u.next} } func (u *unaryNegation) Series(ctx context.Context) ([]labels.Labels, error) { + start := time.Now() + defer func() { u.AddExecutionTimeTaken(time.Since(start)) }() + if err := u.loadSeries(ctx); err != nil { return nil, err } @@ -75,12 +68,15 @@ func (u *unaryNegation) GetPool() *model.VectorPool { } func (u *unaryNegation) Next(ctx context.Context) ([]model.StepVector, error) { + start := time.Now() + defer func() { u.AddExecutionTimeTaken(time.Since(start)) }() + select { case <-ctx.Done(): return nil, ctx.Err() default: } - start := time.Now() + in, err := u.next.Next(ctx) if err != nil { return nil, err @@ -91,6 +87,5 @@ func (u *unaryNegation) Next(ctx context.Context) ([]model.StepVector, error) { for i := range in { floats.Scale(-1, in[i].Samples) } - u.AddExecutionTimeTaken(time.Since(start)) return in, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/execution/warnings/context.go b/vendor/github.com/thanos-io/promql-engine/execution/warnings/context.go index 772009de4b..2472adab5e 100644 --- a/vendor/github.com/thanos-io/promql-engine/execution/warnings/context.go +++ b/vendor/github.com/thanos-io/promql-engine/execution/warnings/context.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + package warnings import ( diff --git a/vendor/github.com/thanos-io/promql-engine/extlabels/labels.go b/vendor/github.com/thanos-io/promql-engine/extlabels/labels.go index aec3e722ab..555eb26bba 100644 --- a/vendor/github.com/thanos-io/promql-engine/extlabels/labels.go +++ b/vendor/github.com/thanos-io/promql-engine/extlabels/labels.go @@ -4,7 +4,6 @@ package extlabels import ( - "github.com/cespare/xxhash/v2" "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" ) @@ -13,29 +12,6 @@ var ( ErrDuplicateLabelSet = errors.New("vector cannot contain metrics with the same labelset") ) -func ContainsDuplicateLabelSetAfterDroppingName(series []labels.Labels) bool { - var ( - buf = make([]byte, 0, 256) - seen = make(map[uint64]struct{}, len(series)) - ) - - b := labels.ScratchBuilder{} - for _, s := range series { - b.Reset() - buf = buf[:0] - - lbls, _ := DropMetricName(s, b) - buf = lbls.Bytes(buf) - - h := xxhash.Sum64(lbls.Bytes(buf)) - if _, ok := seen[h]; ok { - return true - } - seen[h] = struct{}{} - } - return false -} - // DropMetricName removes the __name__ label and returns the dropped name and remaining labels. func DropMetricName(l labels.Labels, b labels.ScratchBuilder) (labels.Labels, labels.Label) { return DropLabel(l, labels.MetricName, b) diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go index e1a0de5b69..0ed33db48b 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go @@ -10,7 +10,9 @@ import ( "strings" "time" + "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" @@ -19,6 +21,10 @@ import ( "github.com/thanos-io/promql-engine/query" ) +var ( + RewrittenExternalLabelWarning = errors.Newf("%s: rewriting an external label with label_replace could lead to unpredictable results", annotations.PromQLWarning.Error()) +) + type timeRange struct { start time.Time end time.Time @@ -77,6 +83,8 @@ type RemoteExecution struct { Engine api.RemoteEngine Query string QueryRangeStart time.Time + + valueType parser.ValueType } func (r RemoteExecution) String() string { @@ -90,7 +98,7 @@ func (r RemoteExecution) Pretty(level int) string { return r.String() } func (r RemoteExecution) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } -func (r RemoteExecution) Type() parser.ValueType { return parser.ValueTypeMatrix } +func (r RemoteExecution) Type() parser.ValueType { return r.valueType } func (r RemoteExecution) PromQLExpr() {} @@ -107,7 +115,7 @@ func (r Deduplicate) Pretty(level int) string { return r.String() } func (r Deduplicate) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } -func (r Deduplicate) Type() parser.ValueType { return parser.ValueTypeMatrix } +func (r Deduplicate) Type() parser.ValueType { return r.Expressions[0].Type() } func (r Deduplicate) PromQLExpr() {} @@ -119,7 +127,7 @@ func (r Noop) Pretty(level int) string { return r.String() } func (r Noop) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } -func (r Noop) Type() parser.ValueType { return parser.ValueTypeMatrix } +func (r Noop) Type() parser.ValueType { return parser.ValueTypeVector } func (r Noop) PromQLExpr() {} @@ -129,7 +137,6 @@ var distributiveAggregations = map[parser.ItemType]struct{}{ parser.SUM: {}, parser.MIN: {}, parser.MAX: {}, - parser.AVG: {}, parser.GROUP: {}, parser.COUNT: {}, parser.BOTTOMK: {}, @@ -139,16 +146,18 @@ var distributiveAggregations = map[parser.ItemType]struct{}{ // DistributedExecutionOptimizer produces a logical plan suitable for // distributed Query execution. type DistributedExecutionOptimizer struct { - Endpoints api.RemoteEndpoints + Endpoints api.RemoteEndpoints + SkipBinaryPushdown bool } -func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr, opts *query.Options) parser.Expr { +func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr, opts *query.Options) (parser.Expr, annotations.Annotations) { engines := m.Endpoints.Engines() sort.Slice(engines, func(i, j int) bool { return engines[i].MinT() < engines[j].MinT() }) labelRanges := make(labelSetRanges) + engineLabels := make(map[string]struct{}) for _, e := range engines { for _, lset := range e.LabelSets() { lsetKey := lset.String() @@ -156,13 +165,25 @@ func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr, opts *query.Op start: time.UnixMilli(e.MinT()), end: time.UnixMilli(e.MaxT()), }) + lset.Range(func(lbl labels.Label) { + engineLabels[lbl.Name] = struct{}{} + }) } } minEngineOverlap := labelRanges.minOverlap() + if rewritesEngineLabels(plan, engineLabels) { + return plan, annotations.New().Add(RewrittenExternalLabelWarning) + } + // TODO(fpetkovski): Consider changing TraverseBottomUp to pass in a list of parents in the transform function. + parents := make(map[*parser.Expr]*parser.Expr) + TraverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { + parents[current] = parent + return false + }) TraverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { // If the current operation is not distributive, stop the traversal. - if !isDistributive(current) { + if !isDistributive(current, m.SkipBinaryPushdown) { return true } @@ -175,7 +196,7 @@ func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr, opts *query.Op } remoteAggregation := newRemoteAggregation(aggr, engines) - subQueries := m.distributeQuery(&remoteAggregation, engines, opts, minEngineOverlap) + subQueries := m.distributeQuery(&remoteAggregation, engines, m.subqueryOpts(parents, current, opts), minEngineOverlap) *current = &parser.AggregateExpr{ Op: localAggregation, Expr: subQueries, @@ -186,17 +207,34 @@ func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr, opts *query.Op } return true } + if isAbsent(*current) { + *current = m.distributeAbsent(*current, engines, calculateStartOffset(current, opts.LookbackDelta), m.subqueryOpts(parents, current, opts)) + return true + } // If the parent operation is distributive, continue the traversal. - if isDistributive(parent) { + if isDistributive(parent, m.SkipBinaryPushdown) { return false } - *current = m.distributeQuery(current, engines, opts, minEngineOverlap) + *current = m.distributeQuery(current, engines, m.subqueryOpts(parents, current, opts), minEngineOverlap) return true }) - return plan + return plan, nil +} + +func (m DistributedExecutionOptimizer) subqueryOpts(parents map[*parser.Expr]*parser.Expr, current *parser.Expr, opts *query.Options) *query.Options { + subqueryParents := make([]*parser.SubqueryExpr, 0, len(parents)) + for p := parents[current]; p != nil; p = parents[p] { + if subquery, ok := (*p).(*parser.SubqueryExpr); ok { + subqueryParents = append(subqueryParents, subquery) + } + } + for i := len(subqueryParents) - 1; i >= 0; i-- { + opts = query.NestedOptionsForSubquery(opts, subqueryParents[i]) + } + return opts } func newRemoteAggregation(rootAggregation *parser.AggregateExpr, engines []api.RemoteEngine) parser.Expr { @@ -232,12 +270,11 @@ func newRemoteAggregation(rootAggregation *parser.AggregateExpr, engines []api.R // For each engine which matches the time range of the query, it creates a RemoteExecution scoped to the range of the engine. // All remote executions are wrapped in a Deduplicate logical node to make sure that results from overlapping engines are deduplicated. func (m DistributedExecutionOptimizer) distributeQuery(expr *parser.Expr, engines []api.RemoteEngine, opts *query.Options, allowedStartOffset time.Duration) parser.Expr { - if isAbsent(*expr) { - return m.distributeAbsent(*expr, engines, opts) - } - startOffset := calculateStartOffset(expr, opts.LookbackDelta) - if allowedStartOffset < maxDuration(opts.LookbackDelta, startOffset) { + if allowedStartOffset < startOffset { + return *expr + } + if isConstantExpr(*expr) { return *expr } @@ -253,6 +290,12 @@ func (m DistributedExecutionOptimizer) distributeQuery(expr *parser.Expr, engine if !matchesExternalLabelSet(*expr, e.LabelSets()) { continue } + if e.MinT() > opts.End.UnixMilli() { + continue + } + if e.MaxT() < opts.Start.UnixMilli()-startOffset.Milliseconds() { + continue + } start, keep := getStartTimeForEngine(e, opts, startOffset, globalMinT) if !keep { @@ -263,6 +306,7 @@ func (m DistributedExecutionOptimizer) distributeQuery(expr *parser.Expr, engine Engine: e, Query: (*expr).String(), QueryRangeStart: start, + valueType: (*expr).Type(), }) } @@ -275,15 +319,34 @@ func (m DistributedExecutionOptimizer) distributeQuery(expr *parser.Expr, engine } } -func (m DistributedExecutionOptimizer) distributeAbsent(expr parser.Expr, engines []api.RemoteEngine, opts *query.Options) parser.Expr { +func (m DistributedExecutionOptimizer) distributeAbsent(expr parser.Expr, engines []api.RemoteEngine, startOffset time.Duration, opts *query.Options) parser.Expr { queries := make(RemoteExecutions, 0, len(engines)) - for i := range engines { + for i, e := range engines { + if e.MaxT() < opts.Start.UnixMilli()-startOffset.Milliseconds() { + continue + } + if e.MinT() > opts.End.UnixMilli() { + continue + } queries = append(queries, RemoteExecution{ Engine: engines[i], Query: expr.String(), QueryRangeStart: opts.Start, + valueType: expr.Type(), }) } + // We need to make sure that absent is at least evaluated against one engine. + // Otherwise, we will end up with an empty result (not absent) when no engine matches the query. + // For practicality, we choose the latest one since it likely has data in memory or on disk. + // TODO(fpetkovski): This could also solved by a synthetic node which acts as a number literal but has specific labels. + if len(queries) == 0 && len(engines) > 0 { + return RemoteExecution{ + Engine: engines[len(engines)-1], + Query: expr.String(), + QueryRangeStart: opts.Start, + valueType: expr.Type(), + } + } var rootExpr parser.Expr = queries[0] for i := 1; i < len(queries); i++ { @@ -369,11 +432,13 @@ func calculateStartOffset(expr *parser.Expr, lookbackDelta time.Duration) time.D var selectRange time.Duration var offset time.Duration traverse(expr, func(node *parser.Expr) { - if matrixSelector, ok := (*node).(*parser.MatrixSelector); ok { - selectRange = matrixSelector.Range - } - if vectorSelector, ok := (*node).(*parser.VectorSelector); ok { - offset = vectorSelector.Offset + switch n := (*node).(type) { + case *parser.SubqueryExpr: + selectRange += n.Range + case *MatrixSelector: + selectRange += n.Range + case *VectorSelector: + offset = n.Offset } }) return maxDuration(offset+selectRange, lookbackDelta) @@ -383,32 +448,42 @@ func numSteps(start, end time.Time, step time.Duration) int64 { return (end.UnixMilli()-start.UnixMilli())/step.Milliseconds() + 1 } -func isDistributive(expr *parser.Expr) bool { +func isDistributive(expr *parser.Expr, skipBinaryPushdown bool) bool { if expr == nil { return false } - switch aggr := (*expr).(type) { + switch e := (*expr).(type) { case *parser.BinaryExpr: - // Binary expressions are joins and need to be done across the entire - // data set. This is why we cannot push down aggregations where - // the operand is a binary expression. - // The only exception currently is pushing down binary expressions with a constant operand. - lhsConstant := isNumberLiteral(aggr.LHS) - rhsConstant := isNumberLiteral(aggr.RHS) - return lhsConstant || rhsConstant + return isBinaryExpressionWithOneConstantSide(e) || (!skipBinaryPushdown && isBinaryExpressionWithDistributableMatching(e)) case *parser.AggregateExpr: // Certain aggregations are currently not supported. - if _, ok := distributiveAggregations[aggr.Op]; !ok { + if _, ok := distributiveAggregations[e.Op]; !ok { return false } case *parser.Call: - return len(aggr.Args) > 0 + return len(e.Args) > 0 } return true } +func isBinaryExpressionWithOneConstantSide(expr *parser.BinaryExpr) bool { + lhsConstant := isConstantExpr(expr.LHS) + rhsConstant := isConstantExpr(expr.RHS) + return (lhsConstant || rhsConstant) +} + +func isBinaryExpressionWithDistributableMatching(expr *parser.BinaryExpr) bool { + if expr.VectorMatching == nil { + return false + } + + // we can distribute if the vector matching contains the external labels so that + // all potential matching partners are contained in one engine + return !expr.VectorMatching.On && len(expr.VectorMatching.MatchingLabels) == 0 +} + // matchesExternalLabels returns false if given matchers are not matching external labels. func matchesExternalLabelSet(expr parser.Expr, externalLabelSet []labels.Labels) bool { if len(externalLabelSet) == 0 { @@ -416,7 +491,7 @@ func matchesExternalLabelSet(expr parser.Expr, externalLabelSet []labels.Labels) } var selectorSet [][]*labels.Matcher traverse(&expr, func(current *parser.Expr) { - vs, ok := (*current).(*parser.VectorSelector) + vs, ok := (*current).(*VectorSelector) if ok { selectorSet = append(selectorSet, vs.LabelMatchers) } @@ -450,17 +525,43 @@ func matchesExternalLabels(ms []*labels.Matcher, externalLabels labels.Labels) b return true } -func isNumberLiteral(expr parser.Expr) bool { - if _, ok := expr.(*parser.NumberLiteral); ok { +func isConstantExpr(expr parser.Expr) bool { + // TODO: there are more possibilities for constant expressions + switch texpr := expr.(type) { + case *parser.NumberLiteral: return true - } - - stepInvariant, ok := expr.(*parser.StepInvariantExpr) - if !ok { + case *parser.StepInvariantExpr: + return isConstantExpr(texpr.Expr) + case *parser.ParenExpr: + return isConstantExpr(texpr.Expr) + case *parser.Call: + constArgs := true + for _, arg := range texpr.Args { + constArgs = constArgs && isConstantExpr(arg) + } + return constArgs + case *parser.BinaryExpr: + return isConstantExpr(texpr.LHS) && isConstantExpr(texpr.RHS) + default: return false } +} - return isNumberLiteral(stepInvariant.Expr) +func rewritesEngineLabels(e parser.Expr, engineLabels map[string]struct{}) bool { + var result bool + TraverseBottomUp(nil, &e, func(parent *parser.Expr, node *parser.Expr) bool { + call, ok := (*node).(*parser.Call) + if !ok || call.Func.Name != "label_replace" { + return false + } + targetLabel := call.Args[1].(*parser.StringLiteral).Val + if _, ok := engineLabels[targetLabel]; ok { + result = true + return true + } + return false + }) + return result } func maxTime(a, b time.Time) time.Time { diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute_avg.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute_avg.go index 8722b22847..a98f5284ff 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute_avg.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute_avg.go @@ -1,22 +1,26 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + package logicalplan import ( "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/promql-engine/query" ) // DistributeAvgOptimizer rewrites an AVG aggregation into a SUM/COUNT aggregation so that // it can be executed in a distributed manner. -type DistributeAvgOptimizer struct{} +type DistributeAvgOptimizer struct { + SkipBinaryPushdown bool +} -func (r DistributeAvgOptimizer) Optimize(plan parser.Expr, _ *query.Options) parser.Expr { +func (r DistributeAvgOptimizer) Optimize(plan parser.Expr, _ *query.Options) (parser.Expr, annotations.Annotations) { TraverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { - // If the current operation is not distributive, stop the traversal. - if !isDistributive(current) { + if !isDistributiveOrAverage(current, r.SkipBinaryPushdown) { return true } - // If the current node is avg(), distribute the operation and // stop the traversal. if aggr, ok := (*current).(*parser.AggregateExpr); ok { @@ -29,16 +33,29 @@ func (r DistributeAvgOptimizer) Optimize(plan parser.Expr, _ *query.Options) par count := *(*current).(*parser.AggregateExpr) count.Op = parser.COUNT *current = &parser.BinaryExpr{ - Op: parser.DIV, - LHS: &sum, - RHS: &count, - VectorMatching: &parser.VectorMatching{}, + Op: parser.DIV, + LHS: &sum, + RHS: &count, + VectorMatching: &parser.VectorMatching{ + Include: aggr.Grouping, + MatchingLabels: aggr.Grouping, + On: true, + }, } return true } - - // If the parent operation is distributive, continue the traversal. - return !isDistributive(parent) + return !isDistributiveOrAverage(parent, r.SkipBinaryPushdown) }) - return plan + return plan, nil +} + +func isDistributiveOrAverage(expr *parser.Expr, skipBinaryPushdown bool) bool { + if expr == nil { + return false + } + var isAvg bool + if aggr, ok := (*expr).(*parser.AggregateExpr); ok { + isAvg = aggr.Op == parser.AVG + } + return isDistributive(expr, skipBinaryPushdown) || isAvg } diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/filter.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/filter.go deleted file mode 100644 index 7d13d8123a..0000000000 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/filter.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) The Thanos Community Authors. -// Licensed under the Apache License 2.0. - -package logicalplan - -import ( - "fmt" - - "github.com/prometheus/prometheus/model/labels" - - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/promql/parser/posrange" -) - -// VectorSelector is vector selector with additional configuration set by optimizers. -// TODO(fpetkovski): Consider replacing all VectorSelector nodes with this one as the first step in the plan. -// This should help us avoid dealing with both types in the entire codebase. -type VectorSelector struct { - *parser.VectorSelector - Filters []*labels.Matcher - BatchSize int64 -} - -func (f VectorSelector) String() string { - if f.BatchSize != 0 && len(f.Filters) != 0 { - return fmt.Sprintf("filter(%s, %s[batch=%d])", f.Filters, f.VectorSelector.String(), f.BatchSize) - } - if f.BatchSize != 0 { - return fmt.Sprintf("%s[batch=%d]", f.VectorSelector.String(), f.BatchSize) - } - return fmt.Sprintf("filter(%s, %s)", f.Filters, f.VectorSelector.String()) -} - -func (f VectorSelector) Pretty(level int) string { return f.String() } - -func (f VectorSelector) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } - -func (f VectorSelector) Type() parser.ValueType { return parser.ValueTypeVector } - -func (f VectorSelector) PromQLExpr() {} diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/merge_selects.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/merge_selects.go index 0dac528251..8865a50d35 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/merge_selects.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/merge_selects.go @@ -5,6 +5,7 @@ package logicalplan import ( "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/promql/parser" @@ -22,17 +23,17 @@ import ( // and apply an additional filter for {c="d"}. type MergeSelectsOptimizer struct{} -func (m MergeSelectsOptimizer) Optimize(expr parser.Expr, _ *query.Options) parser.Expr { +func (m MergeSelectsOptimizer) Optimize(plan parser.Expr, _ *query.Options) (parser.Expr, annotations.Annotations) { heap := make(matcherHeap) - extractSelectors(heap, expr) - replaceMatchers(heap, &expr) + extractSelectors(heap, plan) + replaceMatchers(heap, &plan) - return expr + return plan, nil } func extractSelectors(selectors matcherHeap, expr parser.Expr) { traverse(&expr, func(node *parser.Expr) { - e, ok := (*node).(*parser.VectorSelector) + e, ok := (*node).(*VectorSelector) if !ok { return } @@ -48,8 +49,6 @@ func replaceMatchers(selectors matcherHeap, expr *parser.Expr) { traverse(expr, func(node *parser.Expr) { var matchers []*labels.Matcher switch e := (*node).(type) { - case *parser.VectorSelector: - matchers = e.LabelMatchers case *VectorSelector: matchers = e.LabelMatchers default: @@ -84,12 +83,6 @@ func replaceMatchers(selectors matcherHeap, expr *parser.Expr) { } switch e := (*node).(type) { - case *parser.VectorSelector: - e.LabelMatchers = replacement - *node = &VectorSelector{ - VectorSelector: e, - Filters: filters, - } case *VectorSelector: e.LabelMatchers = replacement e.Filters = filters diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/passthrough.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/passthrough.go index 5f4b82fc39..1729b2b665 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/passthrough.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/passthrough.go @@ -5,6 +5,7 @@ package logicalplan import ( "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/promql/parser" @@ -43,26 +44,26 @@ func matchingEngineTime(e api.RemoteEngine, opts *query.Options) bool { return !(opts.Start.UnixMilli() > e.MaxT() || opts.End.UnixMilli() < e.MinT()) } -func (m PassthroughOptimizer) Optimize(plan parser.Expr, opts *query.Options) parser.Expr { +func (m PassthroughOptimizer) Optimize(plan parser.Expr, opts *query.Options) (parser.Expr, annotations.Annotations) { engines := m.Endpoints.Engines() if len(engines) == 1 { if !matchingEngineTime(engines[0], opts) { - return plan + return plan, nil } return RemoteExecution{ Engine: engines[0], Query: plan.String(), QueryRangeStart: opts.Start, - } + }, nil } if len(engines) == 0 { - return plan + return plan, nil } matchingLabelsEngines := make([]api.RemoteEngine, 0, len(engines)) TraverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { - if vs, ok := (*current).(*parser.VectorSelector); ok { + if vs, ok := (*current).(*VectorSelector); ok { for _, e := range engines { if !labelSetsMatch(vs.LabelMatchers, e.LabelSets()...) { continue @@ -79,8 +80,8 @@ func (m PassthroughOptimizer) Optimize(plan parser.Expr, opts *query.Options) pa Engine: matchingLabelsEngines[0], Query: plan.String(), QueryRangeStart: opts.Start, - } + }, nil } - return plan + return plan, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/plan.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/plan.go index 381cc49894..3a32b11ee8 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/plan.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/plan.go @@ -6,13 +6,15 @@ package logicalplan import ( "fmt" "math" + "strings" "time" - "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/promql-engine/query" ) @@ -28,12 +30,12 @@ var DefaultOptimizers = []Optimizer{ } type Plan interface { - Optimize([]Optimizer) Plan + Optimize([]Optimizer) (Plan, annotations.Annotations) Expr() parser.Expr } type Optimizer interface { - Optimize(expr parser.Expr, opts *query.Options) parser.Expr + Optimize(plan parser.Expr, opts *query.Options) (parser.Expr, annotations.Annotations) } type plan struct { @@ -49,18 +51,28 @@ func New(expr parser.Expr, opts *query.Options) Plan { // the engine handles sorting at the presentation layer expr = trimSorts(expr) + // replace scanners by our logical nodes + expr = replaceSelectors(expr) + return &plan{ expr: expr, opts: opts, } } -func (p *plan) Optimize(optimizers []Optimizer) Plan { +func (p *plan) Optimize(optimizers []Optimizer) (Plan, annotations.Annotations) { + annos := annotations.New() for _, o := range optimizers { - p.expr = o.Optimize(p.expr, p.opts) + var a annotations.Annotations + p.expr, a = o.Optimize(p.expr, p.opts) + annos.Merge(a) } + // parens are just annoying and getting rid of them doesnt change the query + expr := trimParens(p.expr) - return &plan{expr: p.expr, opts: p.opts} + expr = insertDuplicateLabelChecks(expr) + + return &plan{expr: expr, opts: p.opts}, *annos } func (p *plan) Expr() parser.Expr { @@ -70,6 +82,7 @@ func (p *plan) Expr() parser.Expr { func traverse(expr *parser.Expr, transform func(*parser.Expr)) { switch node := (*expr).(type) { case *parser.StepInvariantExpr: + transform(expr) traverse(&node.Expr, transform) case *parser.VectorSelector: transform(expr) @@ -77,13 +90,19 @@ func traverse(expr *parser.Expr, transform func(*parser.Expr)) { var x parser.Expr = node.VectorSelector transform(expr) traverse(&x, transform) + case *MatrixSelector: + var x parser.Expr = node.MatrixSelector + transform(expr) + traverse(&x, transform) case *parser.MatrixSelector: transform(expr) traverse(&node.VectorSelector, transform) case *parser.AggregateExpr: transform(expr) + traverse(&node.Param, transform) traverse(&node.Expr, transform) case *parser.Call: + transform(expr) for i := range node.Args { traverse(&(node.Args[i]), transform) } @@ -92,20 +111,31 @@ func traverse(expr *parser.Expr, transform func(*parser.Expr)) { traverse(&node.LHS, transform) traverse(&node.RHS, transform) case *parser.UnaryExpr: + transform(expr) traverse(&node.Expr, transform) case *parser.ParenExpr: + transform(expr) traverse(&node.Expr, transform) case *parser.SubqueryExpr: + transform(expr) + traverse(&node.Expr, transform) + case CheckDuplicateLabels: + transform(expr) traverse(&node.Expr, transform) } } func TraverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func(parent *parser.Expr, node *parser.Expr) bool) bool { switch node := (*current).(type) { + case *parser.StringLiteral: + return false case *parser.NumberLiteral: return false case *parser.StepInvariantExpr: - return TraverseBottomUp(current, &node.Expr, transform) + if stop := TraverseBottomUp(current, &node.Expr, transform); stop { + return stop + } + return transform(parent, current) case *parser.VectorSelector: return transform(parent, current) case *VectorSelector: @@ -114,6 +144,12 @@ func TraverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func( } var x parser.Expr = node.VectorSelector return TraverseBottomUp(current, &x, transform) + case *MatrixSelector: + if stop := transform(parent, current); stop { + return stop + } + var x parser.Expr = node.MatrixSelector + return TraverseBottomUp(current, &x, transform) case *parser.MatrixSelector: return transform(current, &node.VectorSelector) case *parser.AggregateExpr: @@ -143,13 +179,71 @@ func TraverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func( } return transform(parent, current) case *parser.SubqueryExpr: - return TraverseBottomUp(current, &node.Expr, transform) + if stop := TraverseBottomUp(current, &node.Expr, transform); stop { + return stop + } + return transform(parent, current) + case CheckDuplicateLabels: + if stop := TraverseBottomUp(current, &node.Expr, transform); stop { + return stop + } + return transform(parent, current) } - return true } +func replaceSelectors(plan parser.Expr) parser.Expr { + traverse(&plan, func(current *parser.Expr) { + switch t := (*current).(type) { + case *parser.MatrixSelector: + *current = &MatrixSelector{MatrixSelector: t, OriginalString: t.String()} + case *parser.VectorSelector: + *current = &VectorSelector{VectorSelector: t} + case *parser.Call: + if t.Func.Name != "timestamp" { + return + } + switch v := unwrapParens(t.Args[0]).(type) { + case *parser.VectorSelector: + *current = &VectorSelector{VectorSelector: v, SelectTimestamp: true} + case *parser.StepInvariantExpr: + vs, ok := unwrapParens(v.Expr).(*parser.VectorSelector) + if ok { + // Prometheus weirdness + if vs.Timestamp != nil { + vs.OriginalOffset = 0 + } + *current = &VectorSelector{VectorSelector: vs, SelectTimestamp: true} + } + } + } + }) + return plan +} + func trimSorts(expr parser.Expr) parser.Expr { + canTrimSorts := true + // We cannot trim inner sort if its an argument to a timestamp function. + // If we would do it we could transform "timestamp(sort(X))" into "timestamp(X)" + // Which might return actual timestamps of samples instead of query execution timestamp. + TraverseBottomUp(nil, &expr, func(parent, current *parser.Expr) bool { + if current == nil || parent == nil { + return true + } + e, pok := (*parent).(*parser.Call) + f, cok := (*current).(*parser.Call) + + if pok && cok { + if e.Func.Name == "timestamp" && strings.HasPrefix(f.Func.Name, "sort") { + canTrimSorts = false + return true + } + } + return false + }) + if !canTrimSorts { + return expr + } TraverseBottomUp(nil, &expr, func(parent, current *parser.Expr) bool { if current == nil || parent == nil { return true @@ -166,6 +260,34 @@ func trimSorts(expr parser.Expr) parser.Expr { return expr } +func trimParens(expr parser.Expr) parser.Expr { + TraverseBottomUp(nil, &expr, func(parent, current *parser.Expr) bool { + if current == nil || parent == nil { + return true + } + switch (*parent).(type) { + case *parser.ParenExpr: + *parent = *current + } + return false + }) + return expr +} + +func insertDuplicateLabelChecks(expr parser.Expr) parser.Expr { + traverse(&expr, func(node *parser.Expr) { + switch t := (*node).(type) { + case *parser.AggregateExpr, *parser.UnaryExpr, *parser.BinaryExpr, *parser.Call: + *node = CheckDuplicateLabels{Expr: t} + case *VectorSelector: + if t.SelectTimestamp { + *node = CheckDuplicateLabels{Expr: t} + } + } + }) + return expr +} + // preprocessExpr wraps all possible step invariant parts of the given expression with // StepInvariantExpr. It also resolves the preprocessors. // Copied from Prometheus and adjusted to work with the vendored parser: @@ -278,6 +400,15 @@ func newStepInvariantExpr(expr parser.Expr) parser.Expr { return &parser.StepInvariantExpr{Expr: expr} } +func unwrapParens(expr parser.Expr) parser.Expr { + switch t := expr.(type) { + case *parser.ParenExpr: + return unwrapParens(t.Expr) + default: + return t + } +} + // Copy from https://github.com/prometheus/prometheus/blob/v2.39.1/promql/engine.go#L2658. func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { getOffset := func(ts *int64, originalOffset time.Duration, path []parser.Node) time.Duration { @@ -341,14 +472,76 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { } func setOffsetForInnerSubqueries(expr parser.Expr, opts *query.Options) { - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { - switch n := node.(type) { - case *parser.SubqueryExpr: - nOpts := query.NestedOptionsForSubquery(opts, n) - setOffsetForAtModifier(nOpts.Start.UnixMilli(), n.Expr) - setOffsetForInnerSubqueries(n.Expr, nOpts) - return errors.New("stop iteration") + switch n := expr.(type) { + case *parser.SubqueryExpr: + nOpts := query.NestedOptionsForSubquery(opts, n) + setOffsetForAtModifier(nOpts.Start.UnixMilli(), n.Expr) + setOffsetForInnerSubqueries(n.Expr, nOpts) + default: + for _, c := range parser.Children(n) { + setOffsetForInnerSubqueries(c.(parser.Expr), opts) } - return nil - }) + } } + +// VectorSelector is vector selector with additional configuration set by optimizers. +type VectorSelector struct { + *parser.VectorSelector + Filters []*labels.Matcher + BatchSize int64 + SelectTimestamp bool +} + +func (f VectorSelector) String() string { + if f.SelectTimestamp { + // If we pushed down timestamp into the vector selector we need to render the proper + // PromQL again. + return fmt.Sprintf("timestamp(%s)", f.VectorSelector.String()) + } + return f.VectorSelector.String() +} + +func (f VectorSelector) Pretty(level int) string { return f.String() } + +func (f VectorSelector) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } + +func (f VectorSelector) Type() parser.ValueType { return parser.ValueTypeVector } + +func (f VectorSelector) PromQLExpr() {} + +// MatrixSelector is matrix selector with additional configuration set by optimizers. +// It is used so we can get rid of VectorSelector in distributed mode too. +type MatrixSelector struct { + *parser.MatrixSelector + + // Needed because this operator is used in the distributed mode + OriginalString string +} + +func (f MatrixSelector) String() string { + return f.OriginalString +} + +func (f MatrixSelector) Pretty(level int) string { return f.String() } + +func (f MatrixSelector) PositionRange() posrange.PositionRange { return posrange.PositionRange{} } + +func (f MatrixSelector) Type() parser.ValueType { return parser.ValueTypeVector } + +func (f MatrixSelector) PromQLExpr() {} + +type CheckDuplicateLabels struct { + Expr parser.Expr +} + +func (c CheckDuplicateLabels) String() string { + return c.Expr.String() +} + +func (c CheckDuplicateLabels) Pretty(level int) string { return c.Expr.Pretty(level) } + +func (c CheckDuplicateLabels) PositionRange() posrange.PositionRange { return c.Expr.PositionRange() } + +func (c CheckDuplicateLabels) Type() parser.ValueType { return c.Expr.Type() } + +func (c CheckDuplicateLabels) PromQLExpr() {} diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/propagate_selectors.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/propagate_selectors.go index 1401cc93ba..1aa400dee4 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/propagate_selectors.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/propagate_selectors.go @@ -7,6 +7,7 @@ import ( "sort" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/promql/parser" @@ -17,8 +18,8 @@ import ( // two vector selectors in a binary expression. type PropagateMatchersOptimizer struct{} -func (m PropagateMatchersOptimizer) Optimize(expr parser.Expr, _ *query.Options) parser.Expr { - traverse(&expr, func(expr *parser.Expr) { +func (m PropagateMatchersOptimizer) Optimize(plan parser.Expr, _ *query.Options) (parser.Expr, annotations.Annotations) { + traverse(&plan, func(expr *parser.Expr) { binOp, ok := (*expr).(*parser.BinaryExpr) if !ok { return @@ -42,15 +43,15 @@ func (m PropagateMatchersOptimizer) Optimize(expr parser.Expr, _ *query.Options) propagateMatchers(binOp) }) - return expr + return plan, nil } func propagateMatchers(binOp *parser.BinaryExpr) { - lhSelector, ok := binOp.LHS.(*parser.VectorSelector) + lhSelector, ok := binOp.LHS.(*VectorSelector) if !ok { return } - rhSelector, ok := binOp.RHS.(*parser.VectorSelector) + rhSelector, ok := binOp.RHS.(*VectorSelector) if !ok { return } @@ -105,7 +106,7 @@ func makeUnion(lhMatchers map[string]*labels.Matcher, rhMatchers map[string]*lab return union, false } -func toMatcherMap(lhSelector *parser.VectorSelector) map[string]*labels.Matcher { +func toMatcherMap(lhSelector *VectorSelector) map[string]*labels.Matcher { lhMatchers := make(map[string]*labels.Matcher) for _, m := range lhSelector.LabelMatchers { lhMatchers[m.Name] = m diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/set_batch_size.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/set_batch_size.go index 0ba261179d..0610396383 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/set_batch_size.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/set_batch_size.go @@ -5,6 +5,7 @@ package logicalplan import ( "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/promql-engine/query" ) @@ -19,12 +20,15 @@ type SelectorBatchSize struct { // If any aggregate is present in the plan, the batch size is set to the configured value. // The two exceptions where this cannot be done is if the aggregate is quantile, or // when a binary expression precedes the aggregate. -func (m SelectorBatchSize) Optimize(expr parser.Expr, _ *query.Options) parser.Expr { +func (m SelectorBatchSize) Optimize(plan parser.Expr, _ *query.Options) (parser.Expr, annotations.Annotations) { canBatch := false - traverse(&expr, func(current *parser.Expr) { + traverse(&plan, func(current *parser.Expr) { switch e := (*current).(type) { case *parser.Call: - canBatch = e.Func.Name == "histogram_quantile" + //TODO: calls can reduce the labelset of the input; think histogram_quantile reducing + // multiple "le" labels into one output. We cannot handle this in batching. Revisit + // what is safe here. + canBatch = false case *parser.BinaryExpr: canBatch = false case *parser.AggregateExpr: @@ -38,12 +42,7 @@ func (m SelectorBatchSize) Optimize(expr parser.Expr, _ *query.Options) parser.E e.BatchSize = m.Size } canBatch = false - case *parser.VectorSelector: - if canBatch { - *current = &VectorSelector{VectorSelector: e, BatchSize: m.Size} - } - canBatch = false } }) - return expr + return plan, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/sort_matchers.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/sort_matchers.go index 3f6371c063..bdaa1c584f 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/sort_matchers.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/sort_matchers.go @@ -7,6 +7,7 @@ import ( "sort" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/promql-engine/query" ) @@ -16,9 +17,9 @@ import ( // can rely on this property. type SortMatchers struct{} -func (m SortMatchers) Optimize(expr parser.Expr, _ *query.Options) parser.Expr { - traverse(&expr, func(node *parser.Expr) { - e, ok := (*node).(*parser.VectorSelector) +func (m SortMatchers) Optimize(plan parser.Expr, _ *query.Options) (parser.Expr, annotations.Annotations) { + traverse(&plan, func(node *parser.Expr) { + e, ok := (*node).(*VectorSelector) if !ok { return } @@ -27,5 +28,5 @@ func (m SortMatchers) Optimize(expr parser.Expr, _ *query.Options) parser.Expr { return e.LabelMatchers[i].Name < e.LabelMatchers[j].Name }) }) - return expr + return plan, nil } diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go index 5fb8c3cad8..d33441dc22 100644 --- a/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go +++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + package logicalplan import ( diff --git a/vendor/github.com/thanos-io/promql-engine/query/options.go b/vendor/github.com/thanos-io/promql-engine/query/options.go index 50ebce1f31..3381c054d0 100644 --- a/vendor/github.com/thanos-io/promql-engine/query/options.go +++ b/vendor/github.com/thanos-io/promql-engine/query/options.go @@ -19,7 +19,6 @@ type Options struct { LookbackDelta time.Duration ExtLookbackDelta time.Duration NoStepSubqueryIntervalFn func(time.Duration) time.Duration - EnableSubqueries bool EnableAnalysis bool } @@ -54,7 +53,7 @@ func NestedOptionsForSubquery(opts *Options, t *parser.SubqueryExpr) *Options { StepsBatch: opts.StepsBatch, ExtLookbackDelta: opts.ExtLookbackDelta, NoStepSubqueryIntervalFn: opts.NoStepSubqueryIntervalFn, - EnableSubqueries: opts.EnableSubqueries, + EnableAnalysis: opts.EnableAnalysis, } if t.Step != 0 { nOpts.Step = t.Step diff --git a/vendor/github.com/thanos-io/promql-engine/ringbuffer/ringbuffer.go b/vendor/github.com/thanos-io/promql-engine/ringbuffer/ringbuffer.go new file mode 100644 index 0000000000..337af66980 --- /dev/null +++ b/vendor/github.com/thanos-io/promql-engine/ringbuffer/ringbuffer.go @@ -0,0 +1,76 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package ringbuffer + +type Sample[T any] struct { + T int64 + V T +} + +type RingBuffer[T any] struct { + items []Sample[T] + tail []Sample[T] +} + +func New[T any](size int) *RingBuffer[T] { + return &RingBuffer[T]{ + items: make([]Sample[T], 0, size), + } +} + +func (r *RingBuffer[T]) Len() int { + return len(r.items) +} + +func (r *RingBuffer[T]) MaxT() int64 { + return r.items[len(r.items)-1].T +} + +func (r *RingBuffer[T]) ReadIntoNext(f func(*Sample[T])) { + n := len(r.items) + if cap(r.items) > len(r.items) { + r.items = r.items[:n+1] + } else { + r.items = append(r.items, Sample[T]{}) + } + f(&r.items[n]) +} + +func (r *RingBuffer[T]) Push(t int64, v T) { + if n := len(r.items); n < cap(r.items) { + r.items = r.items[:n+1] + r.items[n].T = t + r.items[n].V = v + } else { + r.items = append(r.items, Sample[T]{T: t, V: v}) + } +} + +func (r *RingBuffer[T]) DropBefore(ts int64) { + if len(r.items) == 0 || r.items[len(r.items)-1].T < ts { + r.items = r.items[:0] + return + } + var drop int + for drop = 0; drop < len(r.items) && r.items[drop].T < ts; drop++ { + } + keep := len(r.items) - drop + + r.tail = resize(r.tail, drop) + copy(r.tail, r.items[:drop]) + copy(r.items, r.items[drop:]) + copy(r.items[keep:], r.tail) + r.items = r.items[:keep] +} + +func (r *RingBuffer[T]) Samples() []Sample[T] { + return r.items +} + +func resize[T any](s []Sample[T], n int) []Sample[T] { + if cap(s) >= n { + return s[:n] + } + return make([]Sample[T], n) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go index d7e589c724..2b36bf8025 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -83,6 +83,9 @@ type LazyBinaryReader struct { // Keep track of the last time it was used. usedAt *atomic.Int64 + + // If true, index header will be downloaded at query time rather than initialization time. + lazyDownload bool } // NewLazyBinaryReader makes a new LazyBinaryReader. If the index-header does not exist @@ -99,8 +102,9 @@ func NewLazyBinaryReader( metrics *LazyBinaryReaderMetrics, binaryReaderMetrics *BinaryReaderMetrics, onClosed func(*LazyBinaryReader), + lazyDownload bool, ) (*LazyBinaryReader, error) { - if dir != "" { + if dir != "" && !lazyDownload { indexHeaderFile := filepath.Join(dir, id.String(), block.IndexHeaderFilename) // If the index-header doesn't exist we should download it. if _, err := os.Stat(indexHeaderFile); err != nil { @@ -131,6 +135,7 @@ func NewLazyBinaryReader( binaryReaderMetrics: binaryReaderMetrics, usedAt: atomic.NewInt64(time.Now().UnixNano()), onClosed: onClosed, + lazyDownload: lazyDownload, }, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index fc8cb26813..e9fe5eb7dc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -14,6 +14,8 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" + + "github.com/thanos-io/thanos/pkg/block/metadata" ) // ReaderPoolMetrics holds metrics tracked by ReaderPool. @@ -46,10 +48,47 @@ type ReaderPool struct { // Keep track of all readers managed by the pool. lazyReadersMx sync.Mutex lazyReaders map[*LazyBinaryReader]struct{} + + lazyDownloadFunc LazyDownloadIndexHeaderFunc +} + +// IndexHeaderLazyDownloadStrategy specifies how to download index headers +// lazily. Only used when lazy mmap is enabled. +type IndexHeaderLazyDownloadStrategy string + +const ( + // EagerDownloadStrategy always disables lazy downloading index headers. + EagerDownloadStrategy IndexHeaderLazyDownloadStrategy = "eager" + // LazyDownloadStrategy always lazily download index headers. + LazyDownloadStrategy IndexHeaderLazyDownloadStrategy = "lazy" +) + +func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIndexHeaderFunc { + switch s { + case LazyDownloadStrategy: + return AlwaysLazyDownloadIndexHeader + default: + // Always fallback to eager download index header. + return AlwaysEagerDownloadIndexHeader + } +} + +// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily +// or not by checking its block metadata. Usecase can be by time or by index file size. +type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool + +// AlwaysEagerDownloadIndexHeader always eagerly download index header. +func AlwaysEagerDownloadIndexHeader(meta *metadata.Meta) bool { + return false +} + +// AlwaysLazyDownloadIndexHeader always lazily download index header. +func AlwaysLazyDownloadIndexHeader(meta *metadata.Meta) bool { + return true } // NewReaderPool makes a new ReaderPool. -func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, metrics *ReaderPoolMetrics) *ReaderPool { +func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, metrics *ReaderPoolMetrics, lazyDownloadFunc LazyDownloadIndexHeaderFunc) *ReaderPool { p := &ReaderPool{ logger: logger, metrics: metrics, @@ -57,6 +96,7 @@ func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTime lazyReaderIdleTimeout: lazyReaderIdleTimeout, lazyReaders: make(map[*LazyBinaryReader]struct{}), close: make(chan struct{}), + lazyDownloadFunc: lazyDownloadFunc, } // Start a goroutine to close idle readers (only if required). @@ -81,12 +121,12 @@ func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTime // NewBinaryReader creates and returns a new binary reader. If the pool has been configured // with lazy reader enabled, this function will return a lazy reader. The returned lazy reader // is tracked by the pool and automatically closed once the idle timeout expires. -func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (Reader, error) { +func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int, meta *metadata.Meta) (Reader, error) { var reader Reader var err error if p.lazyReaderEnabled { - reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed) + reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta)) } else { reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.binaryReader) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go index a390ff7ae3..a479ee242d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -41,6 +41,7 @@ const ( RulerSource SourceType = "ruler" BucketRepairSource SourceType = "bucket.repair" BucketRewriteSource SourceType = "bucket.rewrite" + BucketUploadSource SourceType = "bucket.upload" TestSource SourceType = "test" ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go new file mode 100644 index 0000000000..9de1b4f580 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go @@ -0,0 +1,99 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package clientconfig is a wrapper around github.com/prometheus/common/config with additional +// support for gRPC clients. +package clientconfig + +import ( + "fmt" + "net/url" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +// Config is a structure that allows pointing to various HTTP and GRPC endpoints, e.g. ruler connecting to queriers. +type Config struct { + HTTPConfig HTTPConfig `yaml:",inline"` + GRPCConfig *GRPCConfig `yaml:"grpc_config"` +} + +func DefaultConfig() Config { + return Config{ + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + Scheme: "http", + StaticAddresses: []string{}, + FileSDConfigs: []HTTPFileSDConfig{}, + }, + }, + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{}, + }, + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig() + type plain Config + return unmarshal((*plain)(c)) +} + +// LoadConfigs loads a list of Config from YAML data. +func LoadConfigs(confYAML []byte) ([]Config, error) { + var clientCfg []Config + if err := yaml.UnmarshalStrict(confYAML, &clientCfg); err != nil { + return nil, err + } + return clientCfg, nil +} + +// BuildConfigFromHTTPAddresses returns a configuration from static addresses. +func BuildConfigFromHTTPAddresses(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { + if addr == "" { + return nil, errors.Errorf("static address cannot be empty at index %d", i) + } + // If addr is missing schema, add http. + if !strings.Contains(addr, "://") { + addr = fmt.Sprintf("http://%s", addr) + } + u, err := url.Parse(addr) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse addr %q", addr) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.Errorf("%q is not supported scheme for address", u.Scheme) + } + configs = append(configs, Config{ + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + Scheme: u.Scheme, + StaticAddresses: []string{u.Host}, + PathPrefix: u.Path, + }, + }, + }) + } + return configs, nil +} + +// BuildConfigFromGRPCAddresses returns a configuration from a static addresses. +func BuildConfigFromGRPCAddresses(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { + if addr == "" { + return nil, errors.Errorf("static address cannot be empty at index %d", i) + } + configs = append(configs, Config{ + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{addr}, + }, + }) + } + return configs, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/grpc.go new file mode 100644 index 0000000000..e0987b6c8a --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/grpc.go @@ -0,0 +1,8 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package clientconfig + +type GRPCConfig struct { + EndpointAddrs []string `yaml:"endpoint_addresses"` +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go similarity index 85% rename from vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go rename to vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go index 4b71a03fda..b99dd9ef8f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go @@ -1,8 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -// Package httpconfig is a wrapper around github.com/prometheus/common/config. -package httpconfig +package clientconfig import ( "context" @@ -15,6 +14,8 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/go-kit/log" @@ -30,8 +31,18 @@ import ( "github.com/thanos-io/thanos/pkg/discovery/cache" ) -// ClientConfig configures an HTTP client. -type ClientConfig struct { +// HTTPConfig is a structure that allows pointing to various HTTP endpoint, e.g ruler connecting to queriers. +type HTTPConfig struct { + HTTPClientConfig HTTPClientConfig `yaml:"http_config"` + EndpointsConfig HTTPEndpointsConfig `yaml:",inline"` +} + +func (c *HTTPConfig) NotEmpty() bool { + return len(c.EndpointsConfig.FileSDConfigs) > 0 || len(c.EndpointsConfig.StaticAddresses) > 0 +} + +// HTTPClientConfig configures an HTTP client. +type HTTPClientConfig struct { // The HTTP basic authentication credentials for the targets. BasicAuth BasicAuth `yaml:"basic_auth"` // The bearer token for the targets. @@ -75,7 +86,7 @@ func (b BasicAuth) IsZero() bool { return b.Username == "" && b.Password == "" && b.PasswordFile == "" } -// Transport configures client's transport properties. +// TransportConfig configures client's transport properties. type TransportConfig struct { MaxIdleConns int `yaml:"max_idle_conns"` MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` @@ -100,12 +111,12 @@ var defaultTransportConfig TransportConfig = TransportConfig{ DialerTimeout: int64(5 * time.Second), } -func NewDefaultClientConfig() ClientConfig { - return ClientConfig{TransportConfig: defaultTransportConfig} +func NewDefaultHTTPClientConfig() HTTPClientConfig { + return HTTPClientConfig{TransportConfig: defaultTransportConfig} } -func NewClientConfigFromYAML(cfg []byte) (*ClientConfig, error) { - conf := &ClientConfig{TransportConfig: defaultTransportConfig} +func NewHTTPClientConfigFromYAML(cfg []byte) (*HTTPClientConfig, error) { + conf := &HTTPClientConfig{TransportConfig: defaultTransportConfig} if err := yaml.Unmarshal(cfg, conf); err != nil { return nil, err } @@ -188,7 +199,7 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig } // NewHTTPClient returns a new HTTP client. -func NewHTTPClient(cfg ClientConfig, name string) (*http.Client, error) { +func NewHTTPClient(cfg HTTPClientConfig, name string) (*http.Client, error) { httpClientConfig := config_util.HTTPClientConfig{ BearerToken: config_util.Secret(cfg.BearerToken), BearerTokenFile: cfg.BearerTokenFile, @@ -272,13 +283,13 @@ func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error return u.rt.RoundTrip(r) } -// EndpointsConfig configures a cluster of HTTP endpoints from static addresses and +// HTTPEndpointsConfig configures a cluster of HTTP endpoints from static addresses and // file service discovery. -type EndpointsConfig struct { +type HTTPEndpointsConfig struct { // List of addresses with DNS prefixes. StaticAddresses []string `yaml:"static_configs"` // List of file configurations (our FileSD supports different DNS lookups). - FileSDConfigs []FileSDConfig `yaml:"file_sd_configs"` + FileSDConfigs []HTTPFileSDConfig `yaml:"file_sd_configs"` // The URL scheme to use when talking to targets. Scheme string `yaml:"scheme"` @@ -287,13 +298,13 @@ type EndpointsConfig struct { PathPrefix string `yaml:"path_prefix"` } -// FileSDConfig represents a file service discovery configuration. -type FileSDConfig struct { +// HTTPFileSDConfig represents a file service discovery configuration. +type HTTPFileSDConfig struct { Files []string `yaml:"files"` RefreshInterval model.Duration `yaml:"refresh_interval"` } -func (c FileSDConfig) convert() (file.SDConfig, error) { +func (c HTTPFileSDConfig) convert() (file.SDConfig, error) { var fileSDConfig file.SDConfig b, err := yaml.Marshal(c) if err != nil { @@ -308,8 +319,8 @@ type AddressProvider interface { Addresses() []string } -// Client represents a client that can send requests to a cluster of HTTP-based endpoints. -type Client struct { +// HTTPClient represents a client that can send requests to a cluster of HTTP-based endpoints. +type HTTPClient struct { logger log.Logger httpClient *http.Client @@ -324,7 +335,7 @@ type Client struct { } // NewClient returns a new Client. -func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, provider AddressProvider) (*Client, error) { +func NewClient(logger log.Logger, cfg HTTPEndpointsConfig, client *http.Client, provider AddressProvider) (*HTTPClient, error) { if logger == nil { logger = log.NewNopLogger() } @@ -335,9 +346,14 @@ func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, prov if err != nil { return nil, err } - discoverers = append(discoverers, file.NewDiscovery(&fileSDCfg, logger)) + // We provide an empty registry and ignore metrics for now. + discovery, err := file.NewDiscovery(&fileSDCfg, logger, prometheus.NewRegistry()) + if err != nil { + return nil, err + } + discoverers = append(discoverers, discovery) } - return &Client{ + return &HTTPClient{ logger: logger, httpClient: client, scheme: cfg.Scheme, @@ -350,12 +366,12 @@ func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, prov } // Do executes an HTTP request with the underlying HTTP client. -func (c *Client) Do(req *http.Request) (*http.Response, error) { +func (c *HTTPClient) Do(req *http.Request) (*http.Response, error) { return c.httpClient.Do(req) } // Endpoints returns the list of known endpoints. -func (c *Client) Endpoints() []*url.URL { +func (c *HTTPClient) Endpoints() []*url.URL { var urls []*url.URL for _, addr := range c.provider.Addresses() { urls = append(urls, @@ -370,7 +386,7 @@ func (c *Client) Endpoints() []*url.URL { } // Discover runs the service to discover endpoints until the given context is done. -func (c *Client) Discover(ctx context.Context) { +func (c *HTTPClient) Discover(ctx context.Context) { var wg sync.WaitGroup ch := make(chan []*targetgroup.Group) @@ -400,6 +416,6 @@ func (c *Client) Discover(ctx context.Context) { } // Resolve refreshes and resolves the list of targets. -func (c *Client) Resolve(ctx context.Context) error { +func (c *HTTPClient) Resolve(ctx context.Context) error { return c.provider.Resolve(ctx, append(c.fileSDCache.Addresses(), c.staticAddresses...)) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index ae0d48bffd..edf822434c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -941,10 +941,14 @@ func (e HaltError) Error() string { return e.err.Error() } +func (e HaltError) Unwrap() error { + return errors.Cause(e.err) +} + // IsHaltError returns true if the base error is a HaltError. // If a multierror is passed, any halt error will return true. func IsHaltError(err error) bool { - if multiErr, ok := errors.Cause(err).(errutil.NonNilMultiError); ok { + if multiErr, ok := errors.Cause(err).(errutil.NonNilMultiRootError); ok { for _, err := range multiErr { if _, ok := errors.Cause(err).(HaltError); ok { return true @@ -974,10 +978,14 @@ func (e RetryError) Error() string { return e.err.Error() } +func (e RetryError) Unwrap() error { + return errors.Cause(e.err) +} + // IsRetryError returns true if the base error is a RetryError. // If a multierror is passed, all errors must be retriable. func IsRetryError(err error) bool { - if multiErr, ok := errors.Cause(err).(errutil.NonNilMultiError); ok { + if multiErr, ok := errors.Cause(err).(errutil.NonNilMultiRootError); ok { for _, err := range multiErr { if _, ok := errors.Cause(err).(RetryError); !ok { return false diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go index b648aca558..dfbae08289 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/component/component.go +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -111,6 +111,7 @@ var ( Bucket = source{component: component{name: "bucket"}} Cleanup = source{component: component{name: "cleanup"}} Mark = source{component: component{name: "mark"}} + Upload = source{component: component{name: "upload"}} Rewrite = source{component: component{name: "rewrite"}} Retention = source{component: component{name: "retention"}} Compact = source{component: component{name: "compact"}} diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go index b7ab4988f4..a99b714e27 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go +++ b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go @@ -7,6 +7,8 @@ import ( "bytes" "fmt" "sync" + + "github.com/pkg/errors" ) // The MultiError type implements the error interface, and contains the @@ -62,6 +64,33 @@ type NonNilMultiError MultiError // Returns a concatenated string of the contained errors. func (es NonNilMultiError) Error() string { + return multiErrorString(es) +} + +func (es NonNilMultiError) Cause() error { + return es.getCause() +} + +func (es NonNilMultiError) getCause() NonNilMultiRootError { + var causes []error + for _, err := range es { + if multiErr, ok := errors.Cause(err).(NonNilMultiError); ok { + causes = append(causes, multiErr.getCause()...) + } else { + causes = append(causes, errors.Cause(err)) + } + } + return causes +} + +type NonNilMultiRootError MultiError + +// Returns a concatenated string of the contained errors. +func (es NonNilMultiRootError) Error() string { + return multiErrorString(es) +} + +func multiErrorString(es []error) string { var buf bytes.Buffer if len(es) > 1 { diff --git a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/config.go b/vendor/github.com/thanos-io/thanos/pkg/httpconfig/config.go deleted file mode 100644 index 3280e33378..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/config.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package httpconfig - -import ( - "fmt" - "net/url" - "strings" - - "gopkg.in/yaml.v2" - - "github.com/pkg/errors" -) - -// Config is a structure that allows pointing to various HTTP endpoint, e.g ruler connecting to queriers. -type Config struct { - HTTPClientConfig ClientConfig `yaml:"http_config"` - EndpointsConfig EndpointsConfig `yaml:",inline"` -} - -func DefaultConfig() Config { - return Config{ - EndpointsConfig: EndpointsConfig{ - Scheme: "http", - StaticAddresses: []string{}, - FileSDConfigs: []FileSDConfig{}, - }, - } -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultConfig() - type plain Config - return unmarshal((*plain)(c)) -} - -// LoadConfigs loads a list of Config from YAML data. -func LoadConfigs(confYAML []byte) ([]Config, error) { - var queryCfg []Config - if err := yaml.UnmarshalStrict(confYAML, &queryCfg); err != nil { - return nil, err - } - return queryCfg, nil -} - -// BuildConfig returns a configuration from a static addresses. -func BuildConfig(addrs []string) ([]Config, error) { - configs := make([]Config, 0, len(addrs)) - for i, addr := range addrs { - if addr == "" { - return nil, errors.Errorf("static address cannot be empty at index %d", i) - } - // If addr is missing schema, add http. - if !strings.Contains(addr, "://") { - addr = fmt.Sprintf("http://%s", addr) - } - u, err := url.Parse(addr) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse addr %q", addr) - } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.Errorf("%q is not supported scheme for address", u.Scheme) - } - configs = append(configs, Config{ - EndpointsConfig: EndpointsConfig{ - Scheme: u.Scheme, - StaticAddresses: []string{u.Host}, - PathPrefix: u.Path, - }, - }) - } - return configs, nil -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index 16c292d2f8..5dde62c5ee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/codes" "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" @@ -85,7 +85,7 @@ func NewClient(c HTTPClient, logger log.Logger, userAgent string) *Client { // NewDefaultClient returns Client with tracing tripperware. func NewDefaultClient() *Client { - client, _ := httpconfig.NewHTTPClient(httpconfig.ClientConfig{}, "") + client, _ := clientconfig.NewHTTPClient(clientconfig.HTTPClientConfig{}, "") return NewWithTracingClient( log.NewNopLogger(), client, @@ -161,6 +161,22 @@ func IsWALDirAccessible(dir string) error { return nil } +// IsDirAccessible returns no error if dir can be found. +func IsDirAccessible(dir string) error { + const errMsg = "Dir is not accessible." + + f, err := os.Stat(dir) + if err != nil { + return errors.Wrap(err, errMsg) + } + + if !f.IsDir() { + return errors.New(errMsg) + } + + return nil +} + // ExternalLabels returns sorted external labels from /api/v1/status/config Prometheus endpoint. // Note that configuration can be hot reloadable on Prometheus, so this config might change in runtime. func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labels, error) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 3ebd6f06a4..fd4fb7392c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -413,6 +413,8 @@ type BucketStore struct { blockEstimatedMaxSeriesFunc BlockEstimator blockEstimatedMaxChunkFunc BlockEstimator + + indexHeaderLazyDownloadStrategy indexheader.LazyDownloadIndexHeaderFunc } func (s *BucketStore) validate() error { @@ -531,6 +533,14 @@ func WithDontResort(true bool) BucketStoreOption { } } +// WithIndexHeaderLazyDownloadStrategy specifies what block to lazy download its index header. +// Only used when lazy mmap is enabled at the same time. +func WithIndexHeaderLazyDownloadStrategy(strategy indexheader.LazyDownloadIndexHeaderFunc) BucketStoreOption { + return func(s *BucketStore) { + s.indexHeaderLazyDownloadStrategy = strategy + } +} + // NewBucketStore creates a new bucket backed store that implements the store API against // an object store bucket. It is optimized to work against high latency backends. func NewBucketStore( @@ -559,21 +569,22 @@ func NewBucketStore( b := make([]byte, 0, initialBufSize) return &b }}, - chunkPool: pool.NoopBytes{}, - blocks: map[ulid.ULID]*bucketBlock{}, - blockSets: map[uint64]*bucketBlockSet{}, - blockSyncConcurrency: blockSyncConcurrency, - queryGate: gate.NewNoop(), - chunksLimiterFactory: chunksLimiterFactory, - seriesLimiterFactory: seriesLimiterFactory, - bytesLimiterFactory: bytesLimiterFactory, - partitioner: partitioner, - enableCompatibilityLabel: enableCompatibilityLabel, - postingOffsetsInMemSampling: postingOffsetsInMemSampling, - enableSeriesResponseHints: enableSeriesResponseHints, - enableChunkHashCalculation: enableChunkHashCalculation, - seriesBatchSize: SeriesBatchSize, - sortingStrategy: sortingStrategyStore, + chunkPool: pool.NoopBytes{}, + blocks: map[ulid.ULID]*bucketBlock{}, + blockSets: map[uint64]*bucketBlockSet{}, + blockSyncConcurrency: blockSyncConcurrency, + queryGate: gate.NewNoop(), + chunksLimiterFactory: chunksLimiterFactory, + seriesLimiterFactory: seriesLimiterFactory, + bytesLimiterFactory: bytesLimiterFactory, + partitioner: partitioner, + enableCompatibilityLabel: enableCompatibilityLabel, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + enableSeriesResponseHints: enableSeriesResponseHints, + enableChunkHashCalculation: enableChunkHashCalculation, + seriesBatchSize: SeriesBatchSize, + sortingStrategy: sortingStrategyStore, + indexHeaderLazyDownloadStrategy: indexheader.AlwaysEagerDownloadIndexHeader, } for _, option := range options { @@ -582,7 +593,7 @@ func NewBucketStore( // Depend on the options indexReaderPoolMetrics := indexheader.NewReaderPoolMetrics(extprom.WrapRegistererWithPrefix("thanos_bucket_store_", s.reg)) - s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics) + s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics, s.indexHeaderLazyDownloadStrategy) s.metrics = newBucketStoreMetrics(s.reg) // TODO(metalmatze): Might be possible via Option too if err := s.validate(); err != nil { @@ -759,6 +770,7 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er s.dir, meta.ULID, s.postingOffsetsInMemSampling, + meta, ) if err != nil { return errors.Wrap(err, "create index header reader") diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index ea48f7e1a2..197364ca04 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/dedup" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" @@ -578,7 +578,7 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que preq.Header.Set("Content-Type", "application/x-stream-protobuf") preq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") - preq.Header.Set("User-Agent", httpconfig.ThanosUserAgent) + preq.Header.Set("User-Agent", clientconfig.ThanosUserAgent) presp, err = p.client.Do(preq.WithContext(ctx)) if err != nil { return nil, errors.Wrap(err, "send request") diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go index fb3b395a9a..5619977da9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/custom.go @@ -3,7 +3,19 @@ package prompb +import ( + "github.com/prometheus/prometheus/model/histogram" +) + func (h Histogram) IsFloatHistogram() bool { _, ok := h.GetCount().(*Histogram_CountFloat) return ok } + +func FromProtoHistogram(h Histogram) *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return FloatHistogramProtoToFloatHistogram(h) + } else { + return HistogramProtoToFloatHistogram(h) + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go b/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go index f8b54bcc48..aec0bad86a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go @@ -8,9 +8,11 @@ import ( "net/http" "path" - "google.golang.org/grpc/metadata" - "github.com/pkg/errors" + "github.com/prometheus-community/prom-label-proxy/injectproxy" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "google.golang.org/grpc/metadata" ) type contextKey int @@ -136,3 +138,97 @@ func GetTenantFromGRPCMetadata(ctx context.Context) (string, bool) { } return md.Get(DefaultTenantHeader)[0], true } + +func EnforceQueryTenancy(tenantLabel string, tenant string, query string) (string, error) { + labelMatcher := &labels.Matcher{ + Name: tenantLabel, + Type: labels.MatchEqual, + Value: tenant, + } + + e := injectproxy.NewEnforcer(false, labelMatcher) + + expr, err := parser.ParseExpr(query) + if err != nil { + return "", errors.Wrap(err, "error parsing query string, when enforcing tenenacy") + } + + if err := e.EnforceNode(expr); err != nil { + return "", errors.Wrap(err, "error enforcing label") + } + + return expr.String(), nil +} + +func getLabelMatchers(formMatchers []string, tenant string, enforceTenancy bool, tenantLabel string) ([][]*labels.Matcher, error) { + tenantLabelMatcher := &labels.Matcher{ + Name: tenantLabel, + Type: labels.MatchEqual, + Value: tenant, + } + + matcherSets := make([][]*labels.Matcher, 0, len(formMatchers)) + + // If tenancy is enforced, but there are no matchers at all, add the tenant matcher + if len(formMatchers) == 0 && enforceTenancy { + var matcher []*labels.Matcher + matcher = append(matcher, tenantLabelMatcher) + matcherSets = append(matcherSets, matcher) + return matcherSets, nil + } + + for _, s := range formMatchers { + matchers, err := parser.ParseMetricSelector(s) + if err != nil { + return nil, err + } + + if enforceTenancy { + e := injectproxy.NewEnforcer(false, tenantLabelMatcher) + matchers, err = e.EnforceMatchers(matchers) + if err != nil { + return nil, err + } + } + + matcherSets = append(matcherSets, matchers) + } + + return matcherSets, nil +} + +// This function will: +// - Get tenant from HTTP header and add it to context. +// - if tenancy is enforced, add a tenant matcher to the promQL expression. +func RewritePromQL(ctx context.Context, r *http.Request, tenantHeader string, defaultTenantID string, certTenantField string, enforceTenancy bool, tenantLabel string, queryStr string) (string, string, context.Context, error) { + tenant, err := GetTenantFromHTTP(r, tenantHeader, defaultTenantID, certTenantField) + if err != nil { + return "", "", ctx, err + } + ctx = context.WithValue(ctx, TenantKey, tenant) + + if enforceTenancy { + queryStr, err = EnforceQueryTenancy(tenantLabel, tenant, queryStr) + return queryStr, tenant, ctx, err + } + return queryStr, tenant, ctx, nil +} + +// This function will: +// - Get tenant from HTTP header and add it to context. +// - Parse all labels matchers provided. +// - If tenancy is enforced, make sure a tenant matcher is present. +func RewriteLabelMatchers(ctx context.Context, r *http.Request, tenantHeader string, defaultTenantID string, certTenantField string, enforceTenancy bool, tenantLabel string, formMatchers []string) ([][]*labels.Matcher, context.Context, error) { + tenant, err := GetTenantFromHTTP(r, tenantHeader, defaultTenantID, certTenantField) + if err != nil { + return nil, ctx, err + } + ctx = context.WithValue(ctx, TenantKey, tenant) + + matcherSets, err := getLabelMatchers(formMatchers, tenant, enforceTenancy, tenantLabel) + if err != nil { + return nil, ctx, err + } + + return matcherSets, ctx, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 0000000000..844b50299f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index b5e22c498a..2ce119731b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,7 +24,7 @@ import ( var ( defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") ) type decodeBinaryError struct { @@ -1540,12 +1540,12 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr return err } - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSONValue(t, src) } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. @@ -1588,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonr val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSON(src) } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 7d526c4ef8..4ab14a668c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -564,12 +564,14 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bs return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} } - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) } @@ -593,12 +595,14 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw. return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} } - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err } - data := returns[0].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) } @@ -622,23 +626,31 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} } - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() + } + v, err := m.ProxyBSON() + if err != nil { + return err + } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() } + encoder, err := ec.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, data.Elem()) + return encoder.EncodeValue(ec, vw, vv) } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index a1bf9c3e2b..e5923230b0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -8,7 +8,6 @@ package bsoncodec import ( "reflect" - "sync" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -22,9 +21,8 @@ var _ ValueDecoder = &PointerCodec{} // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // PointerCodec registered. type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex + ecache typeEncoderCache + dcache typeDecoderCache } // NewPointerCodec returns a PointerCodec that has been initialized. @@ -32,10 +30,7 @@ type PointerCodec struct { // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // PointerCodec registered. func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } + return &PointerCodec{} } // EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil @@ -52,24 +47,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val return vw.WriteNull() } - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} } - return enc.EncodeValue(ec, vw, val.Elem()) + return v.EncodeValue(ec, vw, val.Elem()) } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) if err != nil { return err } - return enc.EncodeValue(ec, vw, val.Elem()) } @@ -80,36 +70,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} } + typ := val.Type() if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadNull() } if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadUndefined() } if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) + val.Set(reflect.New(typ.Elem())) } - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} } - return dec.DecodeValue(dc, vr, val.Elem()) + return v.DecodeValue(dc, vr, val.Elem()) } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) if err != nil { return err } - return dec.DecodeValue(dc, vr, val.Elem()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 930de28490..196c491bbb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -216,72 +216,42 @@ func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Typ // // Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder, len(rb.registry.typeEncoders)) - for t, enc := range rb.registry.typeEncoders { - registry.typeEncoders[t] = enc - } - - registry.typeDecoders = make(map[reflect.Type]ValueDecoder, len(rb.registry.typeDecoders)) - for t, dec := range rb.registry.typeDecoders { - registry.typeDecoders[t] = dec - } - - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.registry.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.registry.interfaceEncoders) - - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.registry.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.registry.interfaceDecoders) - - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.registry.kindEncoders { - registry.kindEncoders[kind] = enc - } - - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.registry.kindDecoders { - registry.kindDecoders[kind] = dec + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), } - - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.registry.typeMap { - registry.typeMap[bt] = rt - } - - return registry + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r } // A Registry is used to store and retrieve codecs for types and interfaces. This type is the main // typed passed around and Encoders and Decoders are constructed from it. type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - interfaceEncoders []interfaceValueEncoder interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type } // NewRegistry creates a new empty Registry. func NewRegistry() *Registry { return &Registry{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), } } @@ -296,7 +266,7 @@ func NewRegistry() *Registry { // // RegisterTypeEncoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders[valueType] = enc + r.typeEncoders.Store(valueType, enc) } // RegisterTypeDecoder registers the provided ValueDecoder for the provided type. @@ -310,7 +280,7 @@ func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) // // RegisterTypeDecoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders[valueType] = dec + r.typeDecoders.Store(valueType, dec) } // RegisterKindEncoder registers the provided ValueEncoder for the provided kind. @@ -326,7 +296,7 @@ func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) // // RegisterKindEncoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders[kind] = enc + r.kindEncoders.Store(kind, enc) } // RegisterKindDecoder registers the provided ValueDecoder for the provided kind. @@ -342,7 +312,7 @@ func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { // // RegisterKindDecoder should not be called concurrently with any other Registry method. func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders[kind] = dec + r.kindDecoders.Store(kind, dec) } // RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will @@ -401,7 +371,7 @@ func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder // // reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap[bt] = rt + r.typeMap.Store(bt, rt) } // LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup @@ -418,9 +388,10 @@ func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { // If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for // concurrent use by multiple goroutines after all codecs and encoders are registered. func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - r.mu.RLock() + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } enc, found := r.lookupTypeEncoder(valueType) - r.mu.RUnlock() if found { if enc == nil { return nil, ErrNoEncoder{Type: valueType} @@ -430,36 +401,21 @@ func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { enc, found = r.lookupInterfaceEncoder(valueType, true) if found { - r.mu.Lock() - r.typeEncoders[valueType] = enc - r.mu.Unlock() - return enc, nil + return r.typeEncoders.LoadOrStore(valueType, enc), nil } - if valueType == nil { - r.mu.Lock() - r.typeEncoders[valueType] = nil - r.mu.Unlock() - return nil, ErrNoEncoder{Type: valueType} - } - - enc, found = r.kindEncoders[valueType.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[valueType] = nil - r.mu.Unlock() - return nil, ErrNoEncoder{Type: valueType} + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil } + return nil, ErrNoEncoder{Type: valueType} +} - r.mu.Lock() - r.typeEncoders[valueType] = enc - r.mu.Unlock() - return enc, nil +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) } -func (r *Registry) lookupTypeEncoder(valueType reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[valueType] - return enc, found +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) } func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { @@ -475,7 +431,7 @@ func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool // ahead in interfaceEncoders defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[valueType.Kind()] + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -500,10 +456,7 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: valueType} - r.mu.RLock() dec, found := r.lookupTypeDecoder(valueType) - r.mu.RUnlock() if found { if dec == nil { return nil, ErrNoDecoder{Type: valueType} @@ -513,29 +466,21 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { dec, found = r.lookupInterfaceDecoder(valueType, true) if found { - r.mu.Lock() - r.typeDecoders[valueType] = dec - r.mu.Unlock() - return dec, nil + return r.storeTypeDecoder(valueType, dec), nil } - dec, found = r.kindDecoders[valueType.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[valueType] = nil - r.mu.Unlock() - return nil, decodererr + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil } - - r.mu.Lock() - r.typeDecoders[valueType] = dec - r.mu.Unlock() - return dec, nil + return nil, ErrNoDecoder{Type: valueType} } func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[valueType] - return dec, found + return r.typeDecoders.Load(valueType) +} + +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) } func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { @@ -548,7 +493,7 @@ func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool // ahead in interfaceDecoders defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[valueType.Kind()] + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -561,11 +506,11 @@ func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool // // LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { return nil, ErrNoTypeMapEntry{Type: bt} } - return t, nil + return v.(reflect.Type), nil } type interfaceValueEncoder struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 20c3e7549c..a43daf005f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -62,7 +62,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re } // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { + if val.Type() == tD || val.Type().ConvertibleTo(tD) { d := val.Convert(tD).Interface().(primitive.D) dw, err := vw.WriteDocument() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 1dfdd98865..4cde0a4d6b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -63,8 +63,7 @@ type Zeroer interface { // Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the // StructCodec registered. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex + cache sync.Map // map[reflect.Type]*structDescription parser StructTagParser // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the @@ -115,7 +114,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) structOpt := bsonoptions.MergeStructCodecOptions(opts...) codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), parser: p, } @@ -192,15 +190,14 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val encoder := desc.encoder var zero bool - rvInterface := rv.Interface() if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rvInterface) + zero = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { // isZero will not treat an interface rv as an interface, so we need to check for the // zero interface separately. zero = rv.IsNil() } else { - zero = isZero(rvInterface, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } if desc.omitEmpty && zero { continue @@ -394,56 +391,32 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return nil } -func isZero(i interface{}, omitZeroStruct bool) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true +func isZero(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: + if kind == reflect.Struct { if !omitZeroStruct { return false } - - // TODO(GODRIVER-2820): Update the logic to be able to handle private struct fields. - // TODO Use condition "reflect.Zero(v.Type()).Equal(v)" instead. - vt := v.Type() if vt == tTime { return v.Interface().(time.Time).IsZero() } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { continue // Private field } - fld := v.Field(i) - if !isZero(fld.Interface(), omitZeroStruct) { + if !isZero(v.Field(i), omitZeroStruct) { return false } } return true } - - return false + return !v.IsValid() || v.IsZero() } type structDescription struct { @@ -502,13 +475,27 @@ func (sc *StructCodec) describeStruct( ) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil + } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { numFields := t.NumField() sd := &structDescription{ fm: make(map[string]fieldDescription, numFields), @@ -639,10 +626,6 @@ func (sc *StructCodec) describeStruct( sort.Sort(byIndex(sd.fl)) - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - return sd, nil } @@ -700,21 +683,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { // DeepZero returns recursive zero object func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) } } } } - - return + return result } // recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index 07f4b70e6d..6ade17b7d3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() var tBinary = reflect.TypeOf(primitive.Binary{}) var tUndefined = reflect.TypeOf(primitive.Undefined{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 33d59bd258..4d279b7fee 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -124,7 +124,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error } func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. length, rem, ok := bsoncore.ReadLength(src) if !ok { return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) @@ -193,7 +193,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -213,7 +213,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -258,7 +258,7 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) start := len(dst) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 9bf24fae0b..a242bb57cf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -739,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return nil, ErrEOA } - _, err = vr.readCString() - if err != nil { + if err := vr.skipCString(); err != nil { return nil, err } @@ -794,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) { return vr.d[vr.offset-1], nil } +func (vr *valueReader) skipCString() error { + idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) + if idx < 0 { + return io.EOF + } + vr.offset += int64(idx) + 1 + return nil +} + func (vr *valueReader) readCString() (string, error) { idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) if idx < 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a6dd8d34f5..311518a80d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -28,6 +28,13 @@ var vwPool = sync.Pool{ }, } +func putValueWriter(vw *valueWriter) { + if vw != nil { + vw.w = nil // don't leak the writer + vwPool.Put(vw) + } +} + // BSONValueWriterPool is a pool for BSON ValueWriters. // // Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. @@ -149,32 +156,21 @@ type valueWriter struct { } func (vw *valueWriter) advanceFrame() { - if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack - length := len(vw.stack) - if length+1 >= cap(vw.stack) { - // double it - buf := make([]vwState, 2*cap(vw.stack)+1) - copy(buf, vw.stack) - vw.stack = buf - } - vw.stack = vw.stack[:length+1] - } vw.frame++ + if vw.frame >= int64(len(vw.stack)) { + vw.stack = append(vw.stack, vwState{}) + } } func (vw *valueWriter) push(m mode) { vw.advanceFrame() // Clean the stack - vw.stack[vw.frame].mode = m - vw.stack[vw.frame].key = "" - vw.stack[vw.frame].arrkey = 0 - vw.stack[vw.frame].start = 0 + vw.stack[vw.frame] = vwState{mode: m} - vw.stack[vw.frame].mode = m switch m { case mDocument, mArray, mCodeWithScope: - vw.reserveLength() + vw.reserveLength() // WARN: this is not needed } } @@ -213,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter { return vw } +// TODO: only used in tests func newValueWriterFromSlice(buf []byte) *valueWriter { vw := new(valueWriter) stack := make([]vwState, 1, 5) @@ -249,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod } func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { - switch vw.stack[vw.frame].mode { + frame := &vw.stack[vw.frame] + switch frame.mode { case mElement: - key := vw.stack[vw.frame].key + key := frame.key if !isValidCString(key) { return errors.New("BSON element key cannot contain null bytes") } - - vw.buf = bsoncore.AppendHeader(vw.buf, t, key) + vw.appendHeader(t, key) case mValue: - // TODO: Do this with a cache of the first 1000 or so array keys. - vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) + vw.appendIntHeader(t, frame.arrkey) default: modes := []mode{mElement, mValue} if addmodes != nil { @@ -601,9 +597,11 @@ func (vw *valueWriter) writeLength() error { if length > maxSize { return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))} } - length = length - int(vw.stack[vw.frame].start) - start := vw.stack[vw.frame].start + frame := &vw.stack[vw.frame] + length = length - int(frame.start) + start := frame.start + _ = vw.buf[start+3] // BCE vw.buf[start+0] = byte(length) vw.buf[start+1] = byte(length >> 8) vw.buf[start+2] = byte(length >> 16) @@ -612,5 +610,31 @@ func (vw *valueWriter) writeLength() error { } func isValidCString(cs string) bool { - return !strings.ContainsRune(cs, '\x00') + // Disallow the zero byte in a cstring because the zero byte is used as the + // terminating character. + // + // It's safe to check bytes instead of runes because all multibyte UTF-8 + // code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e. + // 0) will never be part of a multibyte UTF-8 code point. This logic is the + // same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be + // inlined. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127 + return strings.IndexByte(cs, 0) == -1 +} + +// appendHeader is the same as bsoncore.AppendHeader but does not check if the +// key is a valid C string since the caller has already checked for that. +// +// The caller of this function must check if key is a valid C string. +func (vw *valueWriter) appendHeader(t bsontype.Type, key string) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = append(vw.buf, key...) + vw.buf = append(vw.buf, 0x00) +} + +func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = strconv.AppendInt(vw.buf, int64(key), 10) + vw.buf = append(vw.buf, 0x00) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index f38c263a4c..255d9909e3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -47,6 +47,7 @@ const ( BinaryMD5 byte = 0x05 BinaryEncrypted byte = 0x06 BinaryColumn byte = 0x07 + BinarySensitive byte = 0x08 BinaryUserDefined byte = 0x80 ) @@ -102,3 +103,14 @@ func (bt Type) String() string { return "invalid" } } + +// IsValid will return true if the Type is valid. +func (bt Type) IsValid() bool { + switch bt { + case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex, + DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey: + return true + default: + return false + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index f2c48d049e..17ce6697e0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -9,6 +9,7 @@ package bson import ( "bytes" "encoding/json" + "sync" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" @@ -141,6 +142,13 @@ func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{ return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } +// Pool of buffers for marshalling BSON. +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. @@ -162,8 +170,26 @@ func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{ // // See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { - sw := new(bsonrw.SliceWriter) - *sw = dst + sw := bufPool.Get().(*bytes.Buffer) + defer func() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum + // buffer to place back in the pool. We limit the size to 16MiB because + // that's the maximum wire message size supported by any current MongoDB + // server. + // + // Comment based on + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147 + // + // Recycle byte slices that are smaller than 16MiB and at least half + // occupied. + if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() { + bufPool.Put(sw) + } + }() + + sw.Reset() vw := bvwPool.Get(sw) defer bvwPool.Put(vw) @@ -184,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf return nil, err } - return *sw, nil + return append(dst, sw.Bytes()...), nil } // MarshalValue returns the BSON encoding of val. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index 6b9602589c..ff32a87a79 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -8,6 +8,7 @@ package bson import ( "errors" + "fmt" "reflect" "go.mongodb.org/mongo-driver/bson/bsoncodec" @@ -45,15 +46,26 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) // RawValueEncodeValue is the ValueEncoderFunc for RawValue. // -// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders -// registered. +// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or +// nil, then this method will return an error. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive +// encoders and decoders registered. func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRawValue { - return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val} + return bsoncodec.ValueEncoderError{ + Name: "RawValueEncodeValue", + Types: []reflect.Type{tRawValue}, + Received: val, + } } rawvalue := val.Interface().(RawValue) + if !rawvalue.Type.IsValid() { + return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type)) + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index fe990a1771..130da61ba0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -60,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) { // elements. If the document is not valid, the elements up to the invalid point will be returned // along with an error. func (r Raw) Elements() ([]RawElement, error) { - elems, err := bsoncore.Document(r).Elements() + doc := bsoncore.Document(r) + if len(doc) == 0 { + return nil, nil + } + elems, err := doc.Elements() + if err != nil { + return nil, err + } relems := make([]RawElement, 0, len(elems)) for _, elem := range elems { relems = append(relems, RawElement(elem)) } - return relems, err + return relems, nil } // Values returns this document as a slice of values. The returned slice will contain valid values. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 6627294c4d..4d1bfb3160 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -37,6 +37,12 @@ type RawValue struct { r *bsoncodec.Registry } +// IsZero reports whether the RawValue is zero, i.e. no data is present on +// the RawValue. It returns true if Type is 0 and Value is empty or nil. +func (rv RawValue) IsZero() bool { + return rv.Type == 0x00 && len(rv.Value) == 0 +} + // Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an // error is returned. This method will use the registry used to create the RawValue, if the RawValue // was created from partial BSON processing, or it will use the default registry. Users wishing to diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index e201ac37eb..ef39812467 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -45,5 +45,6 @@ const ( TypeBinaryMD5 = bsontype.BinaryMD5 TypeBinaryEncrypted = bsontype.BinaryEncrypted TypeBinaryColumn = bsontype.BinaryColumn + TypeBinarySensitive = bsontype.BinarySensitive TypeBinaryUserDefined = bsontype.BinaryUserDefined ) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 94d479428f..88133293ea 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -235,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value { return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)} } -// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided +// BuildDocumentElement will append a BSON embedded document element using key and the provided // elements and return the extended buffer. func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte { return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...) @@ -825,6 +825,9 @@ func readLengthBytes(src []byte) ([]byte, []byte, bool) { if !ok { return nil, src, false } + if l < 4 { + return nil, src, false + } if len(src) < int(l) { return nil, src, false } diff --git a/vendor/go.opentelemetry.io/collector/featuregate/README.md b/vendor/go.opentelemetry.io/collector/featuregate/README.md index fe7c4044b8..d3e3c802d6 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/README.md +++ b/vendor/go.opentelemetry.io/collector/featuregate/README.md @@ -67,7 +67,11 @@ modeled after the [system used by Kubernetes](https://kubernetes.io/docs/referen 4. A `stable` feature gate will be removed in the version specified by its `ToVersion` value. Features that prove unworkable in the `alpha` stage may be discontinued -without proceeding to the `beta` stage. Features that make it to the `beta` -stage will not be dropped and will eventually reach general availability -where the `Gate` that allowed them to be disabled during the `beta` stage -will be removed. +without proceeding to the `beta` stage. Instead, they will proceed to the +`deprecated` stage, which will feature is permanently disabled. A feature gate will +be removed once it has been `deprecated` for at least 2 releases of the collector. + +Features that make it to the `beta` stage are intended to reach general availability but may still be discontinued. +If, after wider use, it is determined that the gate should be discontinued it will be reverted to the `alpha` stage +for 2 releases and then proceed to the `deprecated` stage. If instead it is ready for general availability it will +proceed to the `stable` stage. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go index 64d6ec0c5c..9501296812 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/flag.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package featuregate // import "go.opentelemetry.io/collector/featuregate" @@ -21,9 +10,19 @@ import ( "go.uber.org/multierr" ) -// NewFlag returns a flag.Value that directly applies feature gate statuses to a Registry. -func NewFlag(reg *Registry) flag.Value { - return &flagValue{reg: reg} +const ( + featureGatesFlag = "feature-gates" + featureGatesFlagDescription = "Comma-delimited list of feature gate identifiers. Prefix with '-' to disable the feature. '+' or no prefix will enable the feature." +) + +// RegisterFlagsOption is an option for RegisterFlags. +type RegisterFlagsOption interface { + private() +} + +// RegisterFlags that directly applies feature gate statuses to a Registry. +func (r *Registry) RegisterFlags(flagSet *flag.FlagSet, _ ...RegisterFlagsOption) { + flagSet.Var(&flagValue{reg: r}, featureGatesFlag, featureGatesFlagDescription) } // flagValue implements the flag.Value interface and directly applies feature gate statuses to a Registry. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/gate.go b/vendor/go.opentelemetry.io/collector/featuregate/gate.go index 2ba4ec7899..a250ceb9a8 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/gate.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/gate.go @@ -1,20 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package featuregate // import "go.opentelemetry.io/collector/featuregate" -import "sync/atomic" +import ( + "fmt" + "sync/atomic" + + "github.com/hashicorp/go-version" +) // Gate is an immutable object that is owned by the Registry and represents an individual feature that // may be enabled or disabled based on the lifecycle state of the feature and CLI flags specified by the user. @@ -22,8 +16,8 @@ type Gate struct { id string description string referenceURL string - fromVersion string - toVersion string + fromVersion *version.Version + toVersion *version.Version stage Stage enabled *atomic.Bool } @@ -55,10 +49,10 @@ func (g *Gate) ReferenceURL() string { // FromVersion returns the version information when the Gate's was added. func (g *Gate) FromVersion() string { - return g.fromVersion + return fmt.Sprintf("v%s", g.fromVersion) } // ToVersion returns the version information when Gate's in StageStable. func (g *Gate) ToVersion() string { - return g.toVersion + return fmt.Sprintf("v%s", g.toVersion) } diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go index 0474f0c205..b43a33e446 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/registry.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -1,27 +1,26 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package featuregate // import "go.opentelemetry.io/collector/featuregate" import ( "fmt" + "net/url" + "regexp" "sort" "sync" "sync/atomic" + + "github.com/hashicorp/go-version" ) -var globalRegistry = NewRegistry() +var ( + globalRegistry = NewRegistry() + + // idRegexp is used to validate the ID of a Gate. + // IDs' characters must be alphanumeric or dots. + idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`) +) // GlobalRegistry returns the global Registry. func GlobalRegistry() *Registry { @@ -39,43 +38,66 @@ func NewRegistry() *Registry { // RegisterOption allows to configure additional information about a Gate during registration. type RegisterOption interface { - apply(g *Gate) + apply(g *Gate) error } -type registerOptionFunc func(g *Gate) +type registerOptionFunc func(g *Gate) error -func (ro registerOptionFunc) apply(g *Gate) { - ro(g) +func (ro registerOptionFunc) apply(g *Gate) error { + return ro(g) } // WithRegisterDescription adds description for the Gate. func WithRegisterDescription(description string) RegisterOption { - return registerOptionFunc(func(g *Gate) { + return registerOptionFunc(func(g *Gate) error { g.description = description + return nil }) } // WithRegisterReferenceURL adds a URL that has all the contextual information about the Gate. -func WithRegisterReferenceURL(url string) RegisterOption { - return registerOptionFunc(func(g *Gate) { - g.referenceURL = url +// referenceURL must be a valid URL as defined by `net/url.Parse`. +func WithRegisterReferenceURL(referenceURL string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + if _, err := url.Parse(referenceURL); err != nil { + return fmt.Errorf("WithRegisterReferenceURL: invalid reference URL %q: %w", referenceURL, err) + } + + g.referenceURL = referenceURL + return nil }) } // WithRegisterFromVersion is used to set the Gate "FromVersion". // The "FromVersion" contains the Collector release when a feature is introduced. +// fromVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. func WithRegisterFromVersion(fromVersion string) RegisterOption { - return registerOptionFunc(func(g *Gate) { - g.fromVersion = fromVersion + return registerOptionFunc(func(g *Gate) error { + from, err := version.NewVersion(fromVersion) + if err != nil { + return fmt.Errorf("WithRegisterFromVersion: invalid version %q: %w", fromVersion, err) + } + + g.fromVersion = from + return nil }) } // WithRegisterToVersion is used to set the Gate "ToVersion". // The "ToVersion", if not empty, contains the last Collector release in which you can still use a feature gate. // If the feature stage is either "Deprecated" or "Stable", the "ToVersion" is the Collector release when the feature is removed. +// toVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. func WithRegisterToVersion(toVersion string) RegisterOption { - return registerOptionFunc(func(g *Gate) { - g.toVersion = toVersion + return registerOptionFunc(func(g *Gate) error { + to, err := version.NewVersion(toVersion) + if err != nil { + return fmt.Errorf("WithRegisterToVersion: invalid version %q: %w", toVersion, err) + } + + g.toVersion = to + return nil }) } @@ -88,17 +110,36 @@ func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) return g } +func validateID(id string) error { + if id == "" { + return fmt.Errorf("empty ID") + } + + if !idRegexp.MatchString(id) { + return fmt.Errorf("invalid character(s) in ID") + } + return nil +} + // Register a Gate and return it. The returned Gate can be used to check if is enabled or not. +// id must be an ASCII alphanumeric nonempty string. Dots are allowed for namespacing. func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Gate, error) { + if err := validateID(id); err != nil { + return nil, fmt.Errorf("invalid ID %q: %w", id, err) + } + g := &Gate{ id: id, stage: stage, } for _, opt := range opts { - opt.apply(g) + err := opt.apply(g) + if err != nil { + return nil, fmt.Errorf("failed to apply option: %w", err) + } } switch g.stage { - case StageAlpha: + case StageAlpha, StageDeprecated: g.enabled = &atomic.Bool{} case StageBeta, StageStable: enabled := &atomic.Bool{} @@ -107,9 +148,14 @@ func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Ga default: return nil, fmt.Errorf("unknown stage value %q for gate %q", stage, id) } - if g.stage == StageStable && g.toVersion == "" { - return nil, fmt.Errorf("no removal version set for stable gate %q", id) + if (g.stage == StageStable || g.stage == StageDeprecated) && g.toVersion == nil { + return nil, fmt.Errorf("no removal version set for %v gate %q", g.stage.String(), id) + } + + if g.fromVersion != nil && g.toVersion != nil && g.toVersion.LessThan(g.fromVersion) { + return nil, fmt.Errorf("toVersion %q is before fromVersion %q", g.toVersion, g.fromVersion) } + if _, loaded := r.gates.LoadOrStore(id, g); loaded { return nil, fmt.Errorf("attempted to add pre-existing gate %q", id) } @@ -120,16 +166,28 @@ func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Ga func (r *Registry) Set(id string, enabled bool) error { v, ok := r.gates.Load(id) if !ok { - return fmt.Errorf("no such feature gate %q", id) + validGates := []string{} + r.VisitAll(func(g *Gate) { + validGates = append(validGates, g.ID()) + }) + return fmt.Errorf("no such feature gate %q. valid gates: %v", id, validGates) } g := v.(*Gate) - if g.stage == StageStable { + + switch g.stage { + case StageStable: if !enabled { return fmt.Errorf("feature gate %q is stable, can not be disabled", id) } fmt.Printf("Feature gate %q is stable and already enabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + case StageDeprecated: + if enabled { + return fmt.Errorf("feature gate %q is deprecated, can not be enabled", id) + } + fmt.Printf("Feature gate %q is deprecated and already disabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + default: + g.enabled.Store(enabled) } - g.enabled.Store(enabled) return nil } diff --git a/vendor/go.opentelemetry.io/collector/featuregate/stage.go b/vendor/go.opentelemetry.io/collector/featuregate/stage.go index 2034e1c80d..f2be1b248d 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/stage.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/stage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package featuregate // import "go.opentelemetry.io/collector/featuregate" diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go index 5352ff44d5..7434f467ae 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go @@ -137,7 +137,6 @@ func (es Slice) RemoveIf(f func(Value) bool) { (*es.getOrig())[newLen] = (*es.getOrig())[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.getOrig() = (*es.getOrig())[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go index 733341c36b..15d70a6ede 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go @@ -117,7 +117,6 @@ func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go index 859a08b28d..3d76368b38 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go @@ -204,6 +204,17 @@ func (ms ExponentialHistogramDataPoint) RemoveMax() { ms.orig.Max_ = nil } +// ZeroThreshold returns the zerothreshold associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) ZeroThreshold() float64 { + return ms.orig.ZeroThreshold +} + +// SetZeroThreshold replaces the zerothreshold associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) { + ms.state.AssertMutable() + ms.orig.ZeroThreshold = v +} + // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) { dest.state.AssertMutable() @@ -229,4 +240,5 @@ func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoin dest.SetMax(ms.Max()) } + dest.SetZeroThreshold(ms.ZeroThreshold()) } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go index 94ec526565..a466a7c185 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go @@ -119,7 +119,6 @@ func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogra (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go index 47b02e17f7..7ee6ef737f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go @@ -119,7 +119,6 @@ func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go index 30d2e0c1f6..13f05a0ecb 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go @@ -119,7 +119,6 @@ func (es MetricSlice) RemoveIf(f func(Metric) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go index 13cd71b727..57cdd11743 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go @@ -119,7 +119,6 @@ func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go index b25fdc721a..55217ea27d 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go @@ -119,7 +119,6 @@ func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go index d2bbe61085..a86eb0b481 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go @@ -119,7 +119,6 @@ func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go index e8aa51de09..f915500963 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go @@ -119,7 +119,6 @@ func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) { (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go index 21343f8ce5..ed899050ac 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go @@ -119,7 +119,6 @@ func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointV (*es.orig)[newLen] = (*es.orig)[i] newLen++ } - // TODO: Prevent memory leak by erasing truncated values. *es.orig = (*es.orig)[:newLen] } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go index 3df234c200..3944bb0270 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go @@ -17,8 +17,10 @@ import ( var _ Marshaler = (*JSONMarshaler)(nil) +// JSONMarshaler marshals pdata.Metrics to JSON bytes using the OTLP/JSON format. type JSONMarshaler struct{} +// MarshalMetrics to the OTLP/JSON format. func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { buf := bytes.Buffer{} pb := internal.MetricsToProto(internal.Metrics(md)) @@ -26,8 +28,10 @@ func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { return buf.Bytes(), err } +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Metrics. type JSONUnmarshaler struct{} +// UnmarshalMetrics from OTLP/JSON format into pdata.Metrics. func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { iter := jsoniter.ConfigFastest.BorrowIterator(buf) defer jsoniter.ConfigFastest.ReturnIterator(iter) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 303e5505e4..9509014e87 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -34,7 +34,7 @@ const ( RequestCount = "http.server.request_count" // Incoming request count total RequestContentLength = "http.server.request_content_length" // Incoming request bytes total ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total - ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds + ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds ) // Filter is a predicate used to determine whether a given http.request should @@ -42,5 +42,5 @@ const ( type Filter func(*http.Request) bool func newTracer(tp trace.TracerProvider) trace.Tracer { - return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index e4fa1b8d9d..a1b5b5e5aa 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -25,9 +25,8 @@ import ( "go.opentelemetry.io/otel/trace" ) -const ( - instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" -) +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // config represents the configuration options available for the http.Handler // and http.Transport types. @@ -76,7 +75,7 @@ func newConfig(opts ...Option) *config { } c.Meter = c.MeterProvider.Meter( - instrumentationName, + ScopeName, metric.WithInstrumentationVersion(Version()), ) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index b2fbe07841..9a8260059d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -107,13 +107,25 @@ func (h *middleware) createMeasures() { h.counters = make(map[string]metric.Int64Counter) h.valueRecorders = make(map[string]metric.Float64Histogram) - requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + requestBytesCounter, err := h.meter.Int64Counter( + RequestContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), + ) handleErr(err) - responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + responseBytesCounter, err := h.meter.Int64Counter( + ResponseContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), + ) handleErr(err) - serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + serverLatencyMeasure, err := h.meter.Float64Histogram( + ServerLatency, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of HTTP request handling"), + ) handleErr(err) h.counters[RequestContentLength] = requestBytesCounter diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 6eace875cf..bd41c18042 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.45.0" + return "0.46.1" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index f3355c852b..895c7664be 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -14,12 +14,9 @@ go.work.sum gen/ /example/dice/dice -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger /example/namedtracer/namedtracer +/example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus /example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6e8eeec00f..a62511f382 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -12,8 +12,9 @@ linters: - depguard - errcheck - godot - - gofmt + - gofumpt - goimports + - gosec - gosimple - govet - ineffassign @@ -53,6 +54,20 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec include: # revive exported should have comment or be unexported. - EXC0012 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 3e5c35b5dc..24874f856e 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,85 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + ## [1.19.0/0.42.0/0.0.7] 2023-09-28 This release contains the first stable release of the OpenTelemetry Go [metric SDK]. @@ -2656,7 +2735,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 @@ -2731,7 +2812,7 @@ It contains api and sdk for trace and meter. [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 -[Go 1.19]: https://go.dev/doc/go1.19 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index a00dbca7b0..850606ae69 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -90,6 +90,10 @@ git push Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull request ID to the entry you added to `CHANGELOG.md`. +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 5c311706b0..35fc189961 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + .PHONY: tools tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) @@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint @@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink lint-modules: go-mod-tidy .PHONY: lint -lint: misspell lint-modules golangci-lint +lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: | $(PORTO) @@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO) misspell: | $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + .PHONY: codespell codespell: | $(CODESPELL) @$(DOCKERPY) $(CODESPELL) @@ -289,3 +312,7 @@ COMMIT ?= "HEAD" add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 634326ef83..2c5b0cc28a 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | Project | -|---------|------------|-----------------------| -| Traces | Stable | N/A | -| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | -| Logs | Frozen [2] | N/A | +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | -[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 - -- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. -- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 9e6b3b7b52..84532cb1da 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - decodedValue, err := url.QueryUnescape(value) + decodedValue, err := url.PathUnescape(value) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) { // when converting the header into a data structure." key = strings.TrimSpace(k) var err error - value, err = url.QueryUnescape(strings.TrimSpace(v)) + value, err = url.PathUnescape(strings.TrimSpace(v)) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", err, value) } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go index 50ff21a191..cc3d7d19c7 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go @@ -33,10 +33,11 @@ import ( iBaggage "go.opentelemetry.io/otel/internal/baggage" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" ) var ( - noopTracer = trace.NewNoopTracerProvider().Tracer("") + noopTracer = noop.NewTracerProvider().Tracer("") noopSpan = func() trace.Span { _, s := noopTracer.Start(context.Background(), "") return s @@ -317,8 +318,10 @@ type BridgeTracer struct { propagator propagation.TextMapPropagator } -var _ ot.Tracer = &BridgeTracer{} -var _ ot.TracerContextWithSpanExtension = &BridgeTracer{} +var ( + _ ot.Tracer = &BridgeTracer{} + _ ot.TracerContextWithSpanExtension = &BridgeTracer{} +) // NewBridgeTracer creates a new BridgeTracer. The new tracer forwards // the calls to the OpenTelemetry Noop tracer, so it should be @@ -829,15 +832,13 @@ func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) { return t, nil } -type textMapWriter struct { -} +type textMapWriter struct{} func (t *textMapWriter) Set(key string, value string) { // maybe print a warning log. } -type textMapReader struct { -} +type textMapReader struct{} func (t *textMapReader) ForeachKey(handler func(key, val string) error) error { return nil // maybe print a warning log. diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go index 941e277baf..90bad0bd51 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go @@ -18,11 +18,14 @@ import ( "sync" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // TracerProvider is an OpenTelemetry TracerProvider that wraps an OpenTracing // Tracer. type TracerProvider struct { + embedded.TracerProvider + bridge *BridgeTracer provider trace.TracerProvider diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/wrapper.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/wrapper.go index 3e348c4552..1e065e3bc6 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/wrapper.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/wrapper.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // WrapperTracerProvider is an OpenTelemetry TracerProvider that wraps an @@ -26,6 +27,8 @@ import ( // // Deprecated: Use the TracerProvider from NewTracerProvider(...) instead. type WrapperTracerProvider struct { + embedded.TracerProvider + wTracer *WrapperTracer } @@ -56,12 +59,16 @@ func NewWrappedTracerProvider(bridge *BridgeTracer, tracer trace.Tracer) *Wrappe // aware how to operate in environment where OpenTracing API is also // used. type WrapperTracer struct { + embedded.Tracer + bridge *BridgeTracer tracer trace.Tracer } -var _ trace.Tracer = &WrapperTracer{} -var _ migration.DeferredContextSetupTracerExtension = &WrapperTracer{} +var ( + _ trace.Tracer = &WrapperTracer{} + _ migration.DeferredContextSetupTracerExtension = &WrapperTracer{} +) // NewWrapperTracer wraps the passed tracer and also talks to the // passed bridge tracer when setting up the context with the new diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md deleted file mode 100644 index 5029522318..0000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# OpenTelemetry-Go OTLP Span Exporter - -[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) - -[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md) implementation. - -## Installation - -``` -go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace -``` - -## Examples - -- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) -- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) - -## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) - -The `otlptrace` package provides an exporter implementing the OTel span exporter interface. -This exporter is configured using a client satisfying the `otlptrace.Client` interface. -This client handles the transformation of data into wire format and the transmission of that data to the collector. - -## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) - -The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads. - -## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) - -The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads. - -## Configuration - -### Environment Variables - -The following environment variables can be used (instead of options objects) to -override the default configuration. For more information about how each of -these environment variables is interpreted, see [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md). - -| Environment variable | Option | Default value | -| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- | -| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] | -| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | -| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | -| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | -| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | - -[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`. - -Configuration using options have precedence over the environment variables. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go new file mode 100644 index 0000000000..9e642235ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otlptrace contains abstractions for OTLP span exporters. +See the official OTLP span exporter implementations: + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc], + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. +*/ +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 0dbe15555b..b46a38d60a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -24,9 +24,7 @@ import ( tracesdk "go.opentelemetry.io/otel/sdk/trace" ) -var ( - errAlreadyStarted = errors.New("already started") -) +var errAlreadyStarted = errors.New("already started") // Exporter exports trace data in the OTLP wire format. type Exporter struct { @@ -55,7 +53,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) // Start establishes a connection to the receiving endpoint. func (e *Exporter) Start(ctx context.Context) error { - var err = errAlreadyStarted + err := errAlreadyStarted e.startOnce.Do(func() { e.mu.Lock() e.started = true diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 86fb61a0de..b4cc21d7a3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -260,30 +260,38 @@ func (c *client) exportContext(parent context.Context) (context.Context, context // duration to wait for if an explicit throttle time is included in err. func retryable(err error) (bool, time.Duration) { s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { case codes.Canceled, codes.DeadlineExceeded, - codes.ResourceExhausted, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss: - return true, throttleDelay(s) + // Additionally handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) } // Not a retry-able error. return false, 0 } -// throttleDelay returns a duration to wait for if an explicit throttle time -// is included in the response status. -func throttleDelay(s *status.Status) time.Duration { +// throttleDelay returns of the status is RetryInfo +// and the its duration to wait for if an explicit throttle time. +func throttleDelay(s *status.Status) (bool, time.Duration) { for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { - return t.RetryDelay.AsDuration() + return true, t.RetryDelay.AsDuration() } } - return 0 + return false, 0 } // MarshalLog is the marshaling function used by the logging system to represent this Client. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go new file mode 100644 index 0000000000..1f514ef9ea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otlptracegrpc provides an OTLP span exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a host. +The value may additionally a port, a scheme, and a path. +The value accepts "http" and "https" scheme. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - +the filepath to the clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index becb1f0fbb..5530119e4c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.QueryUnescape(n) + name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(v) + value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 19b8434d4d..dddb1f334d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -141,9 +141,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.Traces.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } - if len(cfg.DialOptions) != 0 { - cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) - } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 78ce9ad8f0..17ffeaf6ef 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -93,13 +93,7 @@ func compressorToCompression(compressor string) otlpconfig.Compression { } // WithCompressor sets the compressor for the gRPC client to use when sending -// requests. It is the responsibility of the caller to ensure that the -// compressor set has been registered with google.golang.org/grpc/encoding. -// This can be done by encoding.RegisterCompressor. Some compressors -// auto-register on import, such as gzip, which can be registered by calling -// `import _ "google.golang.org/grpc/encoding/gzip"`. -// -// This option has no effect if WithGRPCConn is used. +// requests. Supported compressor values: "gzip". func WithCompressor(compressor string) Option { return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 10ac73ee3b..8ee285b8d5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -16,5 +16,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.19.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index a33eded872..ebb13c2067 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -34,11 +34,13 @@ type afCounter struct { name string opts []metric.Float64ObservableCounterOption - delegate atomic.Value //metric.Float64ObservableCounter + delegate atomic.Value // metric.Float64ObservableCounter } -var _ unwrapper = (*afCounter)(nil) -var _ metric.Float64ObservableCounter = (*afCounter)(nil) +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) func (i *afCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableCounter(i.name, i.opts...) @@ -63,11 +65,13 @@ type afUpDownCounter struct { name string opts []metric.Float64ObservableUpDownCounterOption - delegate atomic.Value //metric.Float64ObservableUpDownCounter + delegate atomic.Value // metric.Float64ObservableUpDownCounter } -var _ unwrapper = (*afUpDownCounter)(nil) -var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) func (i *afUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) @@ -92,11 +96,13 @@ type afGauge struct { name string opts []metric.Float64ObservableGaugeOption - delegate atomic.Value //metric.Float64ObservableGauge + delegate atomic.Value // metric.Float64ObservableGauge } -var _ unwrapper = (*afGauge)(nil) -var _ metric.Float64ObservableGauge = (*afGauge)(nil) +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) func (i *afGauge) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableGauge(i.name, i.opts...) @@ -121,11 +127,13 @@ type aiCounter struct { name string opts []metric.Int64ObservableCounterOption - delegate atomic.Value //metric.Int64ObservableCounter + delegate atomic.Value // metric.Int64ObservableCounter } -var _ unwrapper = (*aiCounter)(nil) -var _ metric.Int64ObservableCounter = (*aiCounter)(nil) +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) func (i *aiCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableCounter(i.name, i.opts...) @@ -150,11 +158,13 @@ type aiUpDownCounter struct { name string opts []metric.Int64ObservableUpDownCounterOption - delegate atomic.Value //metric.Int64ObservableUpDownCounter + delegate atomic.Value // metric.Int64ObservableUpDownCounter } -var _ unwrapper = (*aiUpDownCounter)(nil) -var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) @@ -179,11 +189,13 @@ type aiGauge struct { name string opts []metric.Int64ObservableGaugeOption - delegate atomic.Value //metric.Int64ObservableGauge + delegate atomic.Value // metric.Int64ObservableGauge } -var _ unwrapper = (*aiGauge)(nil) -var _ metric.Int64ObservableGauge = (*aiGauge)(nil) +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) func (i *aiGauge) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableGauge(i.name, i.opts...) @@ -208,7 +220,7 @@ type sfCounter struct { name string opts []metric.Float64CounterOption - delegate atomic.Value //metric.Float64Counter + delegate atomic.Value // metric.Float64Counter } var _ metric.Float64Counter = (*sfCounter)(nil) @@ -234,7 +246,7 @@ type sfUpDownCounter struct { name string opts []metric.Float64UpDownCounterOption - delegate atomic.Value //metric.Float64UpDownCounter + delegate atomic.Value // metric.Float64UpDownCounter } var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) @@ -260,7 +272,7 @@ type sfHistogram struct { name string opts []metric.Float64HistogramOption - delegate atomic.Value //metric.Float64Histogram + delegate atomic.Value // metric.Float64Histogram } var _ metric.Float64Histogram = (*sfHistogram)(nil) @@ -286,7 +298,7 @@ type siCounter struct { name string opts []metric.Int64CounterOption - delegate atomic.Value //metric.Int64Counter + delegate atomic.Value // metric.Int64Counter } var _ metric.Int64Counter = (*siCounter)(nil) @@ -312,7 +324,7 @@ type siUpDownCounter struct { name string opts []metric.Int64UpDownCounterOption - delegate atomic.Value //metric.Int64UpDownCounter + delegate atomic.Value // metric.Int64UpDownCounter } var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) @@ -338,7 +350,7 @@ type siHistogram struct { name string opts []metric.Int64HistogramOption - delegate atomic.Value //metric.Int64Histogram + delegate atomic.Value // metric.Int64Histogram } var _ metric.Int64Histogram = (*siHistogram)(nil) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 5f008d0982..3f61ec12a3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -39,6 +39,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // tracerProvider is a placeholder for a configured SDK TracerProvider. @@ -46,6 +47,8 @@ import ( // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { + embedded.TracerProvider + mtx sync.Mutex tracers map[il]*tracer delegate trace.TracerProvider @@ -119,6 +122,8 @@ type il struct { // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { + embedded.Tracer + name string opts []trace.TracerOption provider *tracerProvider @@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { + embedded.Span + sc trace.SpanContext tracer *tracer } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index ae24e448d9..54716e13b3 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric]. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, -an author who want to default to silently dropping the call can use +an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/metric/noop]: import "go.opentelemetry.io/otel/metric/noop" diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index cdca00058c..be89cd5334 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -39,6 +39,12 @@ type InstrumentOption interface { Float64ObservableGaugeOption } +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + type descOpt string func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { @@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. func WithUnit(u string) InstrumentOption { return unitOpt(u) } +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + // AddOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as an AddOption. type AddOption interface { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index f0b063721d..0a4825ae6a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -147,8 +147,9 @@ type Float64Histogram interface { // Float64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Float64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all @@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Float64HistogramOption applies options to a [Float64HistogramConfig]. See // [InstrumentOption] for other options that can be used as a // Float64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 6f508eb66d..56667d32fc 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -147,8 +147,9 @@ type Int64Histogram interface { // Int64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Int64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts @@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Int64HistogramOption applies options to a [Int64HistogramConfig]. See // [InstrumentOption] for other options that can be used as an // Int64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 902692da08..75a8f3435a 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -40,8 +40,10 @@ const ( // their proprietary information. type TraceContext struct{} -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +var ( + _ TextMapPropagator = TraceContext{} + traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +) // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ddff454685..e0a43e1384 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.5 +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 324dd4baf2..4279013be8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -21,12 +21,10 @@ import ( "strings" ) -var ( - // ErrPartialResource is returned by a detector when complete source - // information for a Resource is unavailable or the source information - // contains invalid values that are omitted from the returned Resource. - ErrPartialResource = errors.New("partial resource") -) +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") // Detector detects OpenTelemetry resource information. type Detector interface { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index a847c50622..e29ae563a6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -28,16 +28,14 @@ import ( const ( // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. - resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials // svcNameKey is the environment variable name that Service Name information will be read from. svcNameKey = "OTEL_SERVICE_NAME" ) -var ( - // errMissingValue is returned when a resource value is missing. - errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) -) +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) // fromEnv is a Detector that implements the Detector and collects // resources from environment. This Detector is included as a @@ -91,7 +89,7 @@ func constructOTResources(s string) (*Resource, error) { continue } key := strings.TrimSpace(k) - val, err := url.QueryUnescape(strings.TrimSpace(v)) + val, err := url.PathUnescape(strings.TrimSpace(v)) if err != nil { // Retain original value if decoding fails, otherwise it will be // an empty string. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 84e1c58560..0cbd559739 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { osDescription = osDescriptionProvider } -type osTypeDetector struct{} -type osDescriptionDetector struct{} +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) // Detect returns a *Resource that describes the operating system type the // service is running on. @@ -56,7 +58,6 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // service is running on. func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { description, err := osDescription() - if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index e67ff29e26..ecdd11dd76 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -25,14 +25,16 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) -type pidProvider func() int -type executablePathProvider func() (string, error) -type commandArgsProvider func() []string -type ownerProvider func() (*user.User, error) -type runtimeNameProvider func() string -type runtimeVersionProvider func() string -type runtimeOSProvider func() string -type runtimeArchProvider func() string +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) var ( defaultPidProvider pidProvider = os.Getpid @@ -108,14 +110,16 @@ func setUserProviders(ownerProvider ownerProvider) { owner = ownerProvider } -type processPIDDetector struct{} -type processExecutableNameDetector struct{} -type processExecutablePathDetector struct{} -type processCommandArgsDetector struct{} -type processOwnerDetector struct{} -type processRuntimeNameDetector struct{} -type processRuntimeVersionDetector struct{} -type processRuntimeDescriptionDetector struct{} +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 0a018c14de..7d46c4b48e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -25,6 +25,8 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" ) const ( @@ -73,6 +75,8 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} { // TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to // instrumentation so it can trace operational flow through a system. type TracerProvider struct { + embedded.TracerProvider + mu sync.Mutex namedTracer map[instrumentation.Scope]*tracer spanProcessors atomic.Pointer[spanProcessorStates] @@ -139,7 +143,7 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). if p.isShutdown.Load() { - return trace.NewNoopTracerProvider().Tracer(name, opts...) + return noop.NewTracerProvider().Tracer(name, opts...) } c := trace.NewTracerConfig(opts...) if name == "" { @@ -157,7 +161,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran // after the first check above but before we acquired the mutex. if p.isShutdown.Load() { - return trace.NewNoopTracerProvider().Tracer(name, opts...), true + return noop.NewTracerProvider().Tracer(name, opts...), true } t, ok := p.namedTracer[is] if !ok { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index 5ee9715d27..a7bc125b9e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -158,9 +158,9 @@ func NeverSample() Sampler { return alwaysOffSampler{} } -// ParentBased returns a composite sampler which behaves differently, +// ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, -// the root(Sampler) is used to make sampling decision. If the span has +// the decorated sampler is used to make sampling decision. If the span has // a parent, depending on whether the parent is remote and whether it // is sampled, one of the following samplers will apply: // - remoteParentSampled(Sampler) (default: AlwaysOn) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 37cdd4a694..36dbf67764 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -32,6 +32,7 @@ import ( "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // ReadOnlySpan allows reading information from the data structure underlying a @@ -108,6 +109,8 @@ type ReadWriteSpan interface { // recordingSpan is an implementation of the OpenTelemetry Span API // representing the individual component of a trace that is sampled. type recordingSpan struct { + embedded.Span + // mu protects the contents of this span. mu sync.Mutex @@ -158,8 +161,10 @@ type recordingSpan struct { tracer *tracer } -var _ ReadWriteSpan = (*recordingSpan)(nil) -var _ runtimeTracer = (*recordingSpan)(nil) +var ( + _ ReadWriteSpan = (*recordingSpan)(nil) + _ runtimeTracer = (*recordingSpan)(nil) +) // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { @@ -772,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { // that wraps a SpanContext. It performs no operations other than to return // the wrapped SpanContext or TracerProvider that created it. type nonRecordingSpan struct { + embedded.Span + // tracer is the SDK tracer that created this span. tracer *tracer sc trace.SpanContext diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 85a71227f3..301e1a7abc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -20,9 +20,12 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) type tracer struct { + embedded.Tracer + provider *TracerProvider instrumentationScope instrumentation.Scope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index bfe73de9c4..ae8eae8e8b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -162,6 +162,7 @@ func (s spanSnapshot) Resource() *resource.Resource { return s.resource } func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { return s.instrumentationScope } + func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 72d2cb09f7..422d4c964b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.19.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 0000000000..19c394c69b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,338 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values := request.Header["X-Forwarded-For"]; len(values) > 0 { + addr := values[0] + if i := strings.Index(addr, ","); i > 0 { + addr = addr[:i] + } + attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go new file mode 100644 index 0000000000..181fcc9c52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.12.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go new file mode 100644 index 0000000000..d689270943 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go new file mode 100644 index 0000000000..4b4f3cbaf0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) + +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + return sc.NetAttributesFromHTTPRequest(network, request) +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.EndUserAttributesFromHTTPRequest(request) +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.HTTPClientAttributesFromHTTPRequest(request) +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + return sc.HTTPAttributesFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go new file mode 100644 index 0000000000..b2155676f4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go @@ -0,0 +1,1042 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.brands). + BrowserBrandsKey = attribute.Key("browser.brands") + // The platform on which the browser is running + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.platform). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in the + // [os.type and os.name attributes](./os.md). However, for consistency, the values + // in the `browser.platform` attribute should capture the exact value that the + // user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + // Full user-agent string provided by the browser + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` + // API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- + // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- + // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- + // us/global-infrastructure/geographies/), [Google Cloud + // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + // The name of the device manufacturer + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) + // span attributes). + + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting `faas.id` as a span attribute instead. + + // The exact value to use for `faas.id` depends on the cloud provider: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id) of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.We + // b/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name + // (`container.name`). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go new file mode 100644 index 0000000000..2f2a019e43 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go new file mode 100644 index 0000000000..047d8e95cc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go @@ -0,0 +1,1704 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec + // .md#id) uniquely identifies the event. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m + // d#source-1) identifies the context in which an event happened. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + // The [version of the CloudEvents specification](https://github.com/cloudevents/s + // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp + // ec.md#type) contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. + // md#subject) of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Required: Required, if applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". In case there are multiple layers that could be considered for database + // name (e.g. Oracle instance name and schema name), the database name to be used + // is the more specific layer (e.g. Oracle schema name). + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#recording-an-exception). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header + // should also be reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned [section of RFC + // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not + // set the attribute MUST NOT be set. + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") + // The ordinal number of request re-sending attempt. + // + // Type: int + // Required: If and only if a request was retried. + // Stability: stable + // Examples: 3 + HTTPRetryCountKey = attribute.Key("http.retry_count") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.peer.ip`, is available even if that other + // source just confirms the same value as `net.peer.ip`. + // Rationale: For `net.peer.ip`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.peer.ip` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer_group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // The unique identifier for each client. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + // Type of message. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") + // The secondary classifier of message besides topic. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + // MUST be calculated as two different counters starting from `1` one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index cb3efbb9ad..3aadc66cf7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index ab0346f966..440f3d7565 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -62,5 +62,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 0000000000..898db5a754 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 7cf6c7f3ef..c125491cae 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -19,16 +19,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { } // noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{} +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption } // noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{} +type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 0000000000..7f485543c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = Span{} + } + return trace.ContextWithSpan(ctx, span), span +} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 4aa94f79f4..26a4b2260e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +49,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +74,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -486,8 +498,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -518,8 +537,15 @@ type Tracer interface { // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index ca68a82e5f..d1e47ca2fa 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -28,9 +28,9 @@ const ( // based on the W3C Trace Context specification, see // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -40,9 +40,10 @@ const ( ) var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) + withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) + valueRe = regexp.MustCompile(`^` + valueFormat + `$`) + memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ) type member struct { @@ -51,10 +52,19 @@ type member struct { } func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { + if len(key) > 256 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } - if !valueRe.MatchString(value) { + if !noTenantKeyRe.MatchString(key) { + if !withTenantKeyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + atIndex := strings.LastIndex(key, "@") + if atIndex > 241 || len(key)-1-atIndex > 14 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + } + if len(value) > 256 || !valueRe.MatchString(value) { return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) } return member{Key: key, Value: value}, nil @@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { func parseMember(m string) (member, error) { matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + if len(matches) != 3 { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + result, e := newMember(matches[1], matches[2]) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ad64e19967..e2f743585d 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.19.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 7d21276924..3c153c9d6f 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,13 +14,12 @@ module-sets: stable-v1: - version: v1.19.0 + version: v1.21.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough @@ -35,14 +34,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.42.0 + version: v0.44.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/example/view - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus diff --git a/vendor/go.uber.org/goleak/.golangci.yml b/vendor/go.uber.org/goleak/.golangci.yml new file mode 100644 index 0000000000..f84e6da8bc --- /dev/null +++ b/vendor/go.uber.org/goleak/.golangci.yml @@ -0,0 +1,28 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + enable: + - gofumpt + - nolintlint + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false diff --git a/vendor/go.uber.org/goleak/CHANGELOG.md b/vendor/go.uber.org/goleak/CHANGELOG.md index 530f0a573f..5cd3f88a58 100644 --- a/vendor/go.uber.org/goleak/CHANGELOG.md +++ b/vendor/go.uber.org/goleak/CHANGELOG.md @@ -4,6 +4,21 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.3.0] +### Fixed +- Built-in ignores now match function names more accurately. + They will no longer ignore stacks because of file names + that look similar to function names. (#112) +### Added +- Add an `IgnoreAnyFunction` option to ignore stack traces + that have the provided function anywhere in the stack. (#113) +- Ignore `testing.runFuzzing` and `testing.runFuzzTests` alongside + other already-ignored test functions (`testing.RunTests`, etc). (#105) +### Changed +- Miscellaneous CI-related fixes. (#103, #108, #114) + +[1.3.0]: https://github.com/uber-go/goleak/compare/v1.2.1...v1.3.0 + ## [1.2.1] ### Changed - Drop golang/x/lint dependency. @@ -56,4 +71,4 @@ Thanks to @denis-tingajkin for their contributions to this release. ## [0.10.0] - Initial release. -[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD \ No newline at end of file +[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD diff --git a/vendor/go.uber.org/goleak/Makefile b/vendor/go.uber.org/goleak/Makefile index 8dbf722656..eb7154af3a 100644 --- a/vendor/go.uber.org/goleak/Makefile +++ b/vendor/go.uber.org/goleak/Makefile @@ -1,19 +1,26 @@ -export GOBIN ?= $(shell pwd)/bin +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -REVIVE = $(GOBIN)/revive +export GOBIN = $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) +GO_FILES = $(shell find . \ + -path '*/.*' -prune -o \ + '(' -type f -a -name '*.go' ')' -print) + +# Additional test flags. +TEST_FLAGS ?= + +.PHONY: all +all: lint build test + +.PHONY: lint +lint: golangci-lint tidy-lint .PHONY: build build: go build ./... -.PHONY: install -install: - go mod download - .PHONY: test test: go test -v -race ./... @@ -24,18 +31,15 @@ cover: go test -race -coverprofile=cover.out -coverpkg=./... ./... go tool cover -html=cover.out -o cover.html -$(REVIVE): - cd tools && go install github.com/mgechev/revive +.PHONY: golangci-lint +golangci-lint: + golangci-lint run -.PHONY: lint -lint: $(REVIVE) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @go vet ./... 2>&1 | tee -a lint.log - @echo "Checking lint..." - @$(REVIVE) -set_exit_status ./... 2>&1 | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e '^vendor/' -e '^Makefile' | tee -a lint.log - @[ ! -s lint.log ] +.PHONY: tidy +tidy: + go mod tidy + +.PHONY: tidy-lint +tidy-lint: + go mod tidy + git diff --exit-code -- go.mod go.sum diff --git a/vendor/go.uber.org/goleak/README.md b/vendor/go.uber.org/goleak/README.md index a545b5e779..de3d7d51df 100644 --- a/vendor/go.uber.org/goleak/README.md +++ b/vendor/go.uber.org/goleak/README.md @@ -67,8 +67,8 @@ No breaking changes will be made to exported APIs before 2.0. [doc-img]: https://godoc.org/go.uber.org/goleak?status.svg [doc]: https://godoc.org/go.uber.org/goleak -[ci-img]: https://github.com/uber-go/goleak/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/goleak/actions/workflows/go.yml +[ci-img]: https://github.com/uber-go/goleak/actions/workflows/ci.yml/badge.svg +[ci]: https://github.com/uber-go/goleak/actions/workflows/ci.yml [cov-img]: https://codecov.io/gh/uber-go/goleak/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/goleak [release]: https://go.dev/doc/devel/release#policy diff --git a/vendor/go.uber.org/goleak/glide.yaml b/vendor/go.uber.org/goleak/glide.yaml deleted file mode 100644 index c6e7a00a06..0000000000 --- a/vendor/go.uber.org/goleak/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/goleak -import: [] -testImport: -- package: github.com/stretchr/testify - version: ^1.1.4 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/goleak/internal/stack/scan.go b/vendor/go.uber.org/goleak/internal/stack/scan.go new file mode 100644 index 0000000000..4b7ac8423e --- /dev/null +++ b/vendor/go.uber.org/goleak/internal/stack/scan.go @@ -0,0 +1,56 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package stack + +import ( + "bufio" + "io" +) + +// scanner provides a bufio.Scanner the ability to Unscan, +// which allows the current token to be read again +// after the next Scan. +type scanner struct { + *bufio.Scanner + + unscanned bool +} + +func newScanner(r io.Reader) *scanner { + return &scanner{Scanner: bufio.NewScanner(r)} +} + +func (s *scanner) Scan() bool { + if s.unscanned { + s.unscanned = false + return true + } + return s.Scanner.Scan() +} + +// Unscan stops the scanner from advancing its position +// for the next Scan. +// +// Bytes and Text will return the same token after next Scan +// that they do right now. +func (s *scanner) Unscan() { + s.unscanned = true +} diff --git a/vendor/go.uber.org/goleak/internal/stack/stacks.go b/vendor/go.uber.org/goleak/internal/stack/stacks.go index 94f82e4c0d..241a9b8448 100644 --- a/vendor/go.uber.org/goleak/internal/stack/stacks.go +++ b/vendor/go.uber.org/goleak/internal/stack/stacks.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,8 +21,8 @@ package stack import ( - "bufio" "bytes" + "errors" "fmt" "io" "runtime" @@ -34,10 +34,17 @@ const _defaultBufferSize = 64 * 1024 // 64 KiB // Stack represents a single Goroutine's stack. type Stack struct { - id int - state string + id int + state string // e.g. 'running', 'chan receive' + + // The first function on the stack. firstFunction string - fullStack *bytes.Buffer + + // A set of all functions in the stack, + allFunctions map[string]struct{} + + // Full, raw stack trace. + fullStack string } // ID returns the goroutine ID. @@ -52,7 +59,7 @@ func (s Stack) State() string { // Full returns the full stack trace for this goroutine. func (s Stack) Full() string { - return s.fullStack.String() + return s.fullStack } // FirstFunction returns the name of the first function on the stack. @@ -60,6 +67,13 @@ func (s Stack) FirstFunction() string { return s.firstFunction } +// HasFunction reports whether the stack has the given function +// anywhere in it. +func (s Stack) HasFunction(name string) bool { + _, ok := s.allFunctions[name] + return ok +} + func (s Stack) String() string { return fmt.Sprintf( "Goroutine %v in state %v, with %v on top of the stack:\n%s", @@ -67,45 +81,147 @@ func (s Stack) String() string { } func getStacks(all bool) []Stack { - var stacks []Stack + trace := getStackBuffer(all) + stacks, err := newStackParser(bytes.NewReader(trace)).Parse() + if err != nil { + // Well-formed stack traces should never fail to parse. + // If they do, it's a bug in this package. + // Panic so we can fix it. + panic(fmt.Sprintf("Failed to parse stack trace: %v\n%s", err, trace)) + } + return stacks +} + +type stackParser struct { + scan *scanner + stacks []Stack + errors []error +} + +func newStackParser(r io.Reader) *stackParser { + return &stackParser{ + scan: newScanner(r), + } +} + +func (p *stackParser) Parse() ([]Stack, error) { + for p.scan.Scan() { + line := p.scan.Text() + + // If we see the goroutine header, start a new stack. + if strings.HasPrefix(line, "goroutine ") { + stack, err := p.parseStack(line) + if err != nil { + p.errors = append(p.errors, err) + continue + } + p.stacks = append(p.stacks, stack) + } + } + + p.errors = append(p.errors, p.scan.Err()) + return p.stacks, errors.Join(p.errors...) +} + +// parseStack parses a single stack trace from the given scanner. +// line is the first line of the stack trace, which should look like: +// +// goroutine 123 [runnable]: +func (p *stackParser) parseStack(line string) (Stack, error) { + id, state, err := parseGoStackHeader(line) + if err != nil { + return Stack{}, fmt.Errorf("parse header: %w", err) + } - var curStack *Stack - stackReader := bufio.NewReader(bytes.NewReader(getStackBuffer(all))) - for { - line, err := stackReader.ReadString('\n') - if err == io.EOF { + // Read the rest of the stack trace. + var ( + firstFunction string + fullStack bytes.Buffer + ) + funcs := make(map[string]struct{}) + for p.scan.Scan() { + line := p.scan.Text() + if strings.HasPrefix(line, "goroutine ") { + // If we see the goroutine header, + // it's the end of this stack. + // Unscan so the next Scan sees the same line. + p.scan.Unscan() break } - if err != nil { - // We're reading using bytes.NewReader which should never fail. - panic("bufio.NewReader failed on a fixed string") + + fullStack.WriteString(line) + fullStack.WriteByte('\n') // scanner trims the newline + + if len(line) == 0 { + // Empty line usually marks the end of the stack + // but we don't want to have to rely on that. + // Just skip it. + continue } - // If we see the goroutine header, start a new stack. - isFirstLine := false - if strings.HasPrefix(line, "goroutine ") { - // flush any previous stack - if curStack != nil { - stacks = append(stacks, *curStack) + funcName, creator, err := parseFuncName(line) + if err != nil { + return Stack{}, fmt.Errorf("parse function: %w", err) + } + if !creator { + // A function is part of a goroutine's stack + // only if it's not a "created by" function. + // + // The creator function is part of a different stack. + // We don't care about it right now. + funcs[funcName] = struct{}{} + if firstFunction == "" { + firstFunction = funcName } - id, goState := parseGoStackHeader(line) - curStack = &Stack{ - id: id, - state: goState, - fullStack: &bytes.Buffer{}, + + } + + // The function name followed by a line in the form: + // + // example.com/path/to/package/file.go:123 +0x123 + // + // We don't care about the position so we can skip this line. + if p.scan.Scan() { + // Be defensive: + // Skip the line only if it starts with a tab. + bs := p.scan.Bytes() + if len(bs) > 0 && bs[0] == '\t' { + fullStack.Write(bs) + fullStack.WriteByte('\n') + } else { + // Put it back and let the next iteration handle it + // if it doesn't start with a tab. + p.scan.Unscan() } - isFirstLine = true } - curStack.fullStack.WriteString(line) - if !isFirstLine && curStack.firstFunction == "" { - curStack.firstFunction = parseFirstFunc(line) + + if creator { + // The "created by" line is the last line of the stack. + // We can stop parsing now. + // + // Note that if tracebackancestors=N is set, + // there may be more a traceback of the creator function + // following the "created by" line, + // but it should not be considered part of this stack. + // e.g., + // + // created by testing.(*T).Run in goroutine 1 + // /usr/lib/go/src/testing/testing.go:1648 +0x3ad + // [originating from goroutine 1]: + // testing.(*T).Run(...) + // /usr/lib/go/src/testing/testing.go:1649 +0x3ad + // + break } } - if curStack != nil { - stacks = append(stacks, *curStack) - } - return stacks + return Stack{ + id: id, + state: state, + firstFunction: firstFunction, + allFunctions: funcs, + fullStack: fullStack.String(), + }, nil } // All returns the stacks for all running goroutines. @@ -127,29 +243,56 @@ func getStackBuffer(all bool) []byte { } } -func parseFirstFunc(line string) string { - line = strings.TrimSpace(line) - if idx := strings.LastIndex(line, "("); idx > 0 { - return line[:idx] +// Parses a single function from the given line. +// The line is in one of these formats: +// +// example.com/path/to/package.funcName(args...) +// example.com/path/to/package.(*typeName).funcName(args...) +// created by example.com/path/to/package.funcName +// created by example.com/path/to/package.funcName in goroutine [...] +// +// Also reports whether the line was a "created by" line. +func parseFuncName(line string) (name string, creator bool, err error) { + if after, ok := strings.CutPrefix(line, "created by "); ok { + // The function name is the part after "created by " + // and before " in goroutine [...]". + idx := strings.Index(after, " in goroutine") + if idx >= 0 { + after = after[:idx] + } + name = after + creator = true + } else if idx := strings.LastIndexByte(line, '('); idx >= 0 { + // The function name is the part before the last '('. + name = line[:idx] } - panic(fmt.Sprintf("function calls missing parents: %q", line)) + + if name == "" { + return "", false, fmt.Errorf("no function found: %q", line) + } + + return name, creator, nil } // parseGoStackHeader parses a stack header that looks like: // goroutine 643 [runnable]:\n // And returns the goroutine ID, and the state. -func parseGoStackHeader(line string) (goroutineID int, state string) { - line = strings.TrimSuffix(line, ":\n") +func parseGoStackHeader(line string) (goroutineID int, state string, err error) { + // The scanner will have already trimmed the "\n", + // but we'll guard against it just in case. + // + // Trimming them separately makes them both optional. + line = strings.TrimSuffix(strings.TrimSuffix(line, ":"), "\n") parts := strings.SplitN(line, " ", 3) if len(parts) != 3 { - panic(fmt.Sprintf("unexpected stack header format: %q", line)) + return 0, "", fmt.Errorf("unexpected format: %q", line) } id, err := strconv.Atoi(parts[1]) if err != nil { - panic(fmt.Sprintf("failed to parse goroutine ID: %v in line %q", parts[1], line)) + return 0, "", fmt.Errorf("bad goroutine ID %q in line %q", parts[1], line) } state = strings.TrimSuffix(strings.TrimPrefix(parts[2], "["), "]") - return id, state + return id, state, nil } diff --git a/vendor/go.uber.org/goleak/leaks.go b/vendor/go.uber.org/goleak/leaks.go index ee122b7464..cc206f1815 100644 --- a/vendor/go.uber.org/goleak/leaks.go +++ b/vendor/go.uber.org/goleak/leaks.go @@ -82,6 +82,12 @@ type testHelper interface { // tests by doing: // // defer VerifyNone(t) +// +// VerifyNone is currently incompatible with t.Parallel because it cannot +// associate specific goroutines with specific tests. Thus, non-leaking +// goroutines from other tests running in parallel could fail this check. +// If you need to run tests in parallel, use [VerifyTestMain] instead, +// which will verify that no leaking goroutines exist after ALL tests finish. func VerifyNone(t TestingT, options ...Option) { opts := buildOpts(options...) var cleanup func(int) diff --git a/vendor/go.uber.org/goleak/options.go b/vendor/go.uber.org/goleak/options.go index d2d473b71e..53fc0a1dec 100644 --- a/vendor/go.uber.org/goleak/options.go +++ b/vendor/go.uber.org/goleak/options.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -67,6 +67,22 @@ func IgnoreTopFunction(f string) Option { }) } +// IgnoreAnyFunction ignores goroutines where the specified function +// is present anywhere in the stack. +// +// The function name must be fully qualified, e.g., +// +// go.uber.org/goleak.IgnoreAnyFunction +// +// For methods, the fully qualified form looks like: +// +// go.uber.org/goleak.(*MyType).MyMethod +func IgnoreAnyFunction(f string) Option { + return addFilter(func(s stack.Stack) bool { + return s.HasFunction(f) + }) +} + // Cleanup sets up a cleanup function that will be executed at the // end of the leak check. // When passed to [VerifyTestMain], the exit code passed to cleanupFunc @@ -151,8 +167,12 @@ func isTestStack(s stack.Stack) bool { // Since go1.7, a separate goroutine is started to wait for signals. // T.Parallel is for parallel tests, which are blocked until all serial // tests have run with T.Parallel at the top of the stack. + // testing.runFuzzTests is for fuzz testing, it's blocked until the test + // function with all seed corpus have run. + // testing.runFuzzing is for fuzz testing, it's blocked until a failing + // input is found. switch s.FirstFunction() { - case "testing.RunTests", "testing.(*T).Run", "testing.(*T).Parallel": + case "testing.RunTests", "testing.(*T).Run", "testing.(*T).Parallel", "testing.runFuzzing", "testing.runFuzzTests": // In pre1.7 and post-1.7, background goroutines started by the testing // package are blocked waiting on a channel. return strings.HasPrefix(s.State(), "chan receive") @@ -163,7 +183,7 @@ func isTestStack(s stack.Stack) bool { func isSyscallStack(s stack.Stack) bool { // Typically runs in the background when code uses CGo: // https://github.com/golang/go/issues/16714 - return s.FirstFunction() == "runtime.goexit" && strings.HasPrefix(s.State(), "syscall") + return s.HasFunction("runtime.goexit") && strings.HasPrefix(s.State(), "syscall") } func isStdLibStack(s stack.Stack) bool { @@ -174,5 +194,5 @@ func isStdLibStack(s stack.Stack) bool { } // Using signal.Notify will start a runtime goroutine. - return strings.Contains(s.Full(), "runtime.ensureSigM") + return s.HasFunction("runtime.ensureSigM") } diff --git a/vendor/go.uber.org/goleak/tracestack_new.go b/vendor/go.uber.org/goleak/tracestack_new.go index d12ffd8404..4fc6cefcea 100644 --- a/vendor/go.uber.org/goleak/tracestack_new.go +++ b/vendor/go.uber.org/goleak/tracestack_new.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2021-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,12 +23,8 @@ package goleak -import ( - "strings" - - "go.uber.org/goleak/internal/stack" -) +import "go.uber.org/goleak/internal/stack" func isTraceStack(s stack.Stack) bool { - return strings.Contains(s.Full(), "runtime.ReadTrace") + return s.HasFunction("runtime.ReadTrace") } diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdfec..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e39..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b419..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3cb1..8f6c7f493f 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7d2..7db1d1293a 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -12,8 +12,8 @@ import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index c1292b30f3..cd375fbc3c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -11,6 +11,7 @@ import ( "fmt" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -20,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 6cbd3de83e..bd79efc1aa 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -29,6 +29,7 @@ import ( "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -432,12 +433,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -1024,6 +1019,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Selections: make(map[*ast.SelectorExpr]*types.Selection), } typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index c27b91f8c7..55312522dc 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "io" "log" "os" + "os/exec" "reflect" "regexp" "runtime" @@ -21,8 +22,6 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f0b..44719de173 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 0000000000..bbabcd22e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 0000000000..562eef21fa --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 0000000000..a7b79207ae --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions always reports the a file's Go version as the +// zero version at this Go version. +func FileVersions(info *types.Info, file *ast.File) string { return "" } + +// InitFileVersions is a noop at this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 0000000000..7b9ba89a82 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions maps a file to the file's semantic Go version. +// The reported version is the zero version if a version cannot be determined. +func FileVersions(info *types.Info, file *ast.File) string { + return info.FileVersions[file] +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions_go121.go new file mode 100644 index 0000000000..cf4a7d0360 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go121.go @@ -0,0 +1,49 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go new file mode 100644 index 0000000000..c1c1814b28 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go122.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/version" +) + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { return version.Lang(x) } + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return version.Compare(x, y) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return version.IsValid(x) } diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 84f9302dcf..3356fa97b8 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -55,6 +55,8 @@ type DialSettings struct { EnableDirectPathXds bool EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool + UniverseDomain string + DefaultUniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 62a5a2386b..a130609e5d 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.150.0" +const Version = "0.153.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index b2b249eec6..3fdee095c1 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -126,6 +126,22 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) { copy(o.DefaultScopes, w) } +// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain. +// +// It should only be used internally by generated clients. +// +// This is similar to the public WithUniverse, but allows us to determine whether the user has +// overridden the default universe. +func WithDefaultUniverseDomain(ud string) option.ClientOption { + return withDefaultUniverseDomain(ud) +} + +type withDefaultUniverseDomain string + +func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { + o.DefaultUniverseDomain = string(w) +} + // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. func EnableJwtWithScope() option.ClientOption { diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index b2085a1949..c882c1eb48 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -343,3 +343,16 @@ func (w *withCreds) Apply(o *internal.DialSettings) { func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } + +// WithUniverseDomain returns a ClientOption that sets the universe domain. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithUniverseDomain(ud string) ClientOption { + return withUniverseDomain(ud) +} + +type withUniverseDomain string + +func (w withUniverseDomain) Apply(o *internal.DialSettings) { + o.UniverseDomain = string(w) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 5481a74cbb..6c89799d54 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"39383633393336373936373236333033393737\"", + "etag": "\"38383938373230313033363637363637353533\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -215,7 +215,7 @@ "type": "string" } }, - "path": "b/{bucket}/anywhereCache", + "path": "b/{bucket}/anywhereCaches", "response": { "$ref": "AnywhereCaches" }, @@ -3799,7 +3799,7 @@ } } }, - "revision": "20231028", + "revision": "20231117", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 90320fcc0b..c4331c04de 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -3593,7 +3593,7 @@ func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCache") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3670,7 +3670,7 @@ func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCache // "type": "string" // } // }, - // "path": "b/{bucket}/anywhereCache", + // "path": "b/{bucket}/anywhereCaches", // "response": { // "$ref": "AnywhereCaches" // }, diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go new file mode 100644 index 0000000000..3674914f70 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -0,0 +1,177 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/http" + "runtime" + "sync" + "time" + + "k8s.io/klog/v2" +) + +var ( + // ReallyCrash controls the behavior of HandleCrash and defaults to + // true. It's exposed so components can optionally set to false + // to restore prior behavior. This flag is mostly used for tests to validate + // crash conditions. + ReallyCrash = true +) + +// PanicHandlers is a list of functions which will be invoked when a panic happens. +var PanicHandlers = []func(interface{}){logPanic} + +// HandleCrash simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +func HandleCrash(additionalHandlers ...func(interface{})) { + if r := recover(); r != nil { + for _, fn := range PanicHandlers { + fn(r) + } + for _, fn := range additionalHandlers { + fn(r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) + } + } +} + +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). +func logPanic(r interface{}) { + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + } +} + +// ErrorHandlers is a list of functions which will be invoked when a nonreturnable +// error occurs. +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} + +// HandlerError is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. +func HandleError(err error) { + // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead + if err == nil { + return + } + + for _, fn := range ErrorHandlers { + fn(err) + } +} + +// logError prints an error with the call stack of the location it was reported +func logError(err error) { + klog.ErrorDepth(2, err) +} + +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock + r.lastErrorTimeLock.Lock() + d := now.Sub(r.lastErrorTime) + r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) +} + +// GetCaller returns the caller of the function that calls it. +func GetCaller() string { + var pc [1]uintptr + runtime.Callers(3, pc[:]) + f := runtime.FuncForPC(pc[0]) + if f == nil { + return "Unable to find caller" + } + return f.Name() +} + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%s", + r, + *err, + stacktrace) + } +} + +// Must panics on non-nil errors. Useful to handling programmer level errors. +func Must(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100644 index 0000000000..2c9488a5fb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - wojtek-t + - jayunit100 diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go new file mode 100644 index 0000000000..99d3d8e239 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -0,0 +1,211 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides abstractions for registering which metrics +// to record. +package metrics + +import ( + "context" + "net/url" + "sync" + "time" +) + +var registerMetrics sync.Once + +// DurationMetric is a measurement of some amount of time. +type DurationMetric interface { + Observe(duration time.Duration) +} + +// ExpiryMetric sets some time of expiry. If nil, assume not relevant. +type ExpiryMetric interface { + Set(expiry *time.Time) +} + +// LatencyMetric observes client latency partitioned by verb and url. +type LatencyMetric interface { + Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) +} + +type ResolverLatencyMetric interface { + Observe(ctx context.Context, host string, latency time.Duration) +} + +// SizeMetric observes client response size partitioned by verb and host. +type SizeMetric interface { + Observe(ctx context.Context, verb string, host string, size float64) +} + +// ResultMetric counts response codes partitioned by method and host. +type ResultMetric interface { + Increment(ctx context.Context, code string, method string, host string) +} + +// CallsMetric counts calls that take place for a specific exec plugin. +type CallsMetric interface { + // Increment increments a counter per exitCode and callStatus. + Increment(exitCode int, callStatus string) +} + +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + +// TransportCacheMetric shows the number of entries in the internal transport cache +type TransportCacheMetric interface { + Observe(value int) +} + +// TransportCreateCallsMetric counts the number of times a transport is created +// partitioned by the result of the cache: hit, miss, uncacheable +type TransportCreateCallsMetric interface { + Increment(result string) +} + +var ( + // ClientCertExpiry is the expiry time of a client certificate + ClientCertExpiry ExpiryMetric = noopExpiry{} + // ClientCertRotationAge is the age of a certificate that has just been rotated. + ClientCertRotationAge DurationMetric = noopDuration{} + // RequestLatency is the latency metric that rest clients will update. + RequestLatency LatencyMetric = noopLatency{} + // ResolverLatency is the latency metric that DNS resolver will update + ResolverLatency ResolverLatencyMetric = noopResolverLatency{} + // RequestSize is the request size metric that rest clients will update. + RequestSize SizeMetric = noopSize{} + // ResponseSize is the response size metric that rest clients will update. + ResponseSize SizeMetric = noopSize{} + // RateLimiterLatency is the client side rate limiter latency metric. + RateLimiterLatency LatencyMetric = noopLatency{} + // RequestResult is the result metric that rest clients will update. + RequestResult ResultMetric = noopResult{} + // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by + // exit code and call status. + ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} + // TransportCacheEntries is the metric that tracks the number of entries in the + // internal transport cache. + TransportCacheEntries TransportCacheMetric = noopTransportCache{} + // TransportCreateCalls is the metric that counts the number of times a new transport + // is created + TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{} +) + +// RegisterOpts contains all the metrics to register. Metrics may be nil. +type RegisterOpts struct { + ClientCertExpiry ExpiryMetric + ClientCertRotationAge DurationMetric + RequestLatency LatencyMetric + ResolverLatency ResolverLatencyMetric + RequestSize SizeMetric + ResponseSize SizeMetric + RateLimiterLatency LatencyMetric + RequestResult ResultMetric + ExecPluginCalls CallsMetric + RequestRetry RetryMetric + TransportCacheEntries TransportCacheMetric + TransportCreateCalls TransportCreateCallsMetric +} + +// Register registers metrics for the rest client to use. This can +// only be called once. +func Register(opts RegisterOpts) { + registerMetrics.Do(func() { + if opts.ClientCertExpiry != nil { + ClientCertExpiry = opts.ClientCertExpiry + } + if opts.ClientCertRotationAge != nil { + ClientCertRotationAge = opts.ClientCertRotationAge + } + if opts.RequestLatency != nil { + RequestLatency = opts.RequestLatency + } + if opts.ResolverLatency != nil { + ResolverLatency = opts.ResolverLatency + } + if opts.RequestSize != nil { + RequestSize = opts.RequestSize + } + if opts.ResponseSize != nil { + ResponseSize = opts.ResponseSize + } + if opts.RateLimiterLatency != nil { + RateLimiterLatency = opts.RateLimiterLatency + } + if opts.RequestResult != nil { + RequestResult = opts.RequestResult + } + if opts.ExecPluginCalls != nil { + ExecPluginCalls = opts.ExecPluginCalls + } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } + if opts.TransportCacheEntries != nil { + TransportCacheEntries = opts.TransportCacheEntries + } + if opts.TransportCreateCalls != nil { + TransportCreateCalls = opts.TransportCreateCalls + } + }) +} + +type noopDuration struct{} + +func (noopDuration) Observe(time.Duration) {} + +type noopExpiry struct{} + +func (noopExpiry) Set(*time.Time) {} + +type noopLatency struct{} + +func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {} + +type noopResolverLatency struct{} + +func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) { +} + +type noopSize struct{} + +func (noopSize) Observe(context.Context, string, string, float64) {} + +type noopResult struct{} + +func (noopResult) Increment(context.Context, string, string, string) {} + +type noopCalls struct{} + +func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} + +type noopTransportCache struct{} + +func (noopTransportCache) Observe(int) {} + +type noopTransportCreateCalls struct{} + +func (noopTransportCreateCalls) Increment(string) {} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go new file mode 100644 index 0000000000..efda7c197f --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -0,0 +1,238 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "math" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} + +// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultControllerRateLimiter() RateLimiter { + return NewMaxOfRateLimiter( + NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) +} + +// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type BucketRateLimiter struct { + *rate.Limiter +} + +var _ RateLimiter = &BucketRateLimiter{} + +func (r *BucketRateLimiter) When(item interface{}) time.Duration { + return r.Limiter.Reserve().Delay() +} + +func (r *BucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *BucketRateLimiter) Forget(item interface{}) { +} + +// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// dealing with max failures and expiration are up to the caller +type ItemExponentialFailureRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + baseDelay time.Duration + maxDelay time.Duration +} + +var _ RateLimiter = &ItemExponentialFailureRateLimiter{} + +func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { + return &ItemExponentialFailureRateLimiter{ + failures: map[interface{}]int{}, + baseDelay: baseDelay, + maxDelay: maxDelay, + } +} + +func DefaultItemBasedRateLimiter() RateLimiter { + return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) +} + +func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + exp := r.failures[item] + r.failures[item] = r.failures[item] + 1 + + // The backoff is capped such that 'calculated' value never overflows. + backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) + if backoff > math.MaxInt64 { + return r.maxDelay + } + + calculated := time.Duration(backoff) + if calculated > r.maxDelay { + return r.maxDelay + } + + return calculated +} + +func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type ItemFastSlowRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + maxFastAttempts int + fastDelay time.Duration + slowDelay time.Duration +} + +var _ RateLimiter = &ItemFastSlowRateLimiter{} + +func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { + return &ItemFastSlowRateLimiter{ + failures: map[interface{}]int{}, + fastDelay: fastDelay, + slowDelay: slowDelay, + maxFastAttempts: maxFastAttempts, + } +} + +func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + r.failures[item] = r.failures[item] + 1 + + if r.failures[item] <= r.maxFastAttempts { + return r.fastDelay + } + + return r.slowDelay +} + +func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// MaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type MaxOfRateLimiter struct { + limiters []RateLimiter +} + +func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { + ret := time.Duration(0) + for _, limiter := range r.limiters { + curr := limiter.When(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { + return &MaxOfRateLimiter{limiters: limiters} +} + +func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { + ret := 0 + for _, limiter := range r.limiters { + curr := limiter.NumRequeues(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func (r *MaxOfRateLimiter) Forget(item interface{}) { + for _, limiter := range r.limiters { + limiter.Forget(item) + } +} + +// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type WithMaxWaitRateLimiter struct { + limiter RateLimiter + maxDelay time.Duration +} + +func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { + return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} +} + +func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { + delay := w.limiter.When(item) + if delay > w.maxDelay { + return w.maxDelay + } + + return delay +} + +func (w WithMaxWaitRateLimiter) Forget(item interface{}) { + w.limiter.Forget(item) +} + +func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { + return w.limiter.NumRequeues(item) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go new file mode 100644 index 0000000000..c1df720302 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "container/heap" + "sync" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type DelayingInterface interface { + Interface + // AddAfter adds an item to the workqueue after the indicated duration has passed + AddAfter(item interface{}, duration time.Duration) +} + +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type DelayingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue Interface +} + +// NewDelayingQueue constructs a new workqueue with delayed queuing ability. +// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewDelayingQueueWithConfig instead and specify a name. +func NewDelayingQueue() DelayingInterface { + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewWithConfig(QueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) +} + +// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to +// inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. +func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) +} + +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. +func NewNamedDelayingQueue(name string) DelayingInterface { + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) +} + +// NewDelayingQueueWithCustomClock constructs a new named workqueue +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. +func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) +} + +func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { + ret := &delayingType{ + Interface: q, + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name, provider), + } + + go ret.waitingLoop() + return ret +} + +// delayingType wraps an Interface and provides delayed re-enquing +type delayingType struct { + Interface + + // clock tracks time for delayed firing + clock clock.Clock + + // stopCh lets us signal a shutdown to the waiting loop + stopCh chan struct{} + // stopOnce guarantees we only signal shutdown a single time + stopOnce sync.Once + + // heartbeat ensures we wait no more than maxWait before firing + heartbeat clock.Ticker + + // waitingForAddCh is a buffered channel that feeds waitingForAdd + waitingForAddCh chan *waitFor + + // metrics counts the number of retries + metrics retryMetrics +} + +// waitFor holds the data to add and the time it should be added +type waitFor struct { + data t + readyAt time.Time + // index in the priority queue (heap) + index int +} + +// waitForPriorityQueue implements a priority queue for waitFor items. +// +// waitForPriorityQueue implements heap.Interface. The item occurring next in +// time (i.e., the item with the smallest readyAt) is at the root (index 0). +// Peek returns this minimum item at index 0. Pop returns the minimum item after +// it has been removed from the queue and placed at index Len()-1 by +// container/heap. Push adds an item at index Len(), and container/heap +// percolates it into the correct location. +type waitForPriorityQueue []*waitFor + +func (pq waitForPriorityQueue) Len() int { + return len(pq) +} +func (pq waitForPriorityQueue) Less(i, j int) bool { + return pq[i].readyAt.Before(pq[j].readyAt) +} +func (pq waitForPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the queue. Push should not be called directly; instead, +// use `heap.Push`. +func (pq *waitForPriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*waitFor) + item.index = n + *pq = append(*pq, item) +} + +// Pop removes an item from the queue. Pop should not be called directly; +// instead, use `heap.Pop`. +func (pq *waitForPriorityQueue) Pop() interface{} { + n := len(*pq) + item := (*pq)[n-1] + item.index = -1 + *pq = (*pq)[0:(n - 1)] + return item +} + +// Peek returns the item at the beginning of the queue, without removing the +// item or otherwise mutating the queue. It is safe to call directly. +func (pq waitForPriorityQueue) Peek() interface{} { + return pq[0] +} + +// ShutDown stops the queue. After the queue drains, the returned shutdown bool +// on Get() will be true. This method may be invoked more than once. +func (q *delayingType) ShutDown() { + q.stopOnce.Do(func() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() + }) +} + +// AddAfter adds the given item to the work queue after the given delay +func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { + // don't add if we're already shutting down + if q.ShuttingDown() { + return + } + + q.metrics.retry() + + // immediately add things with no delay + if duration <= 0 { + q.Add(item) + return + } + + select { + case <-q.stopCh: + // unblock if ShutDown() is called + case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + } +} + +// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. +// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an +// expired item sitting for more than 10 seconds. +const maxWait = 10 * time.Second + +// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. +func (q *delayingType) waitingLoop() { + defer utilruntime.HandleCrash() + + // Make a placeholder channel to use when there are no items in our list + never := make(<-chan time.Time) + + // Make a timer that expires when the item at the head of the waiting queue is ready + var nextReadyAtTimer clock.Timer + + waitingForQueue := &waitForPriorityQueue{} + heap.Init(waitingForQueue) + + waitingEntryByData := map[t]*waitFor{} + + for { + if q.Interface.ShuttingDown() { + return + } + + now := q.clock.Now() + + // Add ready entries + for waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + if entry.readyAt.After(now) { + break + } + + entry = heap.Pop(waitingForQueue).(*waitFor) + q.Add(entry.data) + delete(waitingEntryByData, entry.data) + } + + // Set up a wait for the first item's readyAt (if one exists) + nextReadyAt := never + if waitingForQueue.Len() > 0 { + if nextReadyAtTimer != nil { + nextReadyAtTimer.Stop() + } + entry := waitingForQueue.Peek().(*waitFor) + nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) + nextReadyAt = nextReadyAtTimer.C() + } + + select { + case <-q.stopCh: + return + + case <-q.heartbeat.C(): + // continue the loop, which will add ready items + + case <-nextReadyAt: + // continue the loop, which will add ready items + + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + + drained := false + for !drained { + select { + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + default: + drained = true + } + } + } + } +} + +// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue +func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { + // if the entry already exists, update the time only if it would cause the item to be queued sooner + existing, exists := knownEntries[entry.data] + if exists { + if existing.readyAt.After(entry.readyAt) { + existing.readyAt = entry.readyAt + heap.Fix(q, existing.index) + } + + return + } + + heap.Push(q, entry) + knownEntries[entry.data] = entry +} diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go new file mode 100644 index 0000000000..8555aa95fe --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package workqueue provides a simple queue that supports the following +// features: +// - Fair: items processed in the order in which they are added. +// - Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// - Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// - Shutdown notifications. +package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go new file mode 100644 index 0000000000..f012ccc554 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -0,0 +1,266 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type queueMetrics interface { + add(item t) + get(item t) + done(item t) + updateUnfinishedWork() +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Inc() + Dec() +} + +// SettableGaugeMetric represents a single numerical value that can arbitrarily go up +// and down. (Separate from GaugeMetric to preserve backwards compatibility.) +type SettableGaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +// HistogramMetric counts individual observations. +type HistogramMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Observe(float64) {} + +// defaultQueueMetrics expects the caller to lock before setting any metrics. +type defaultQueueMetrics struct { + clock clock.Clock + + // current depth of a workqueue + depth GaugeMetric + // total number of adds handled by a workqueue + adds CounterMetric + // how long an item stays in a workqueue + latency HistogramMetric + // how long processing an item from a workqueue takes + workDuration HistogramMetric + addTimes map[t]time.Time + processingStartTimes map[t]time.Time + + // how long have current threads been working? + unfinishedWorkSeconds SettableGaugeMetric + longestRunningProcessor SettableGaugeMetric +} + +func (m *defaultQueueMetrics) add(item t) { + if m == nil { + return + } + + m.adds.Inc() + m.depth.Inc() + if _, exists := m.addTimes[item]; !exists { + m.addTimes[item] = m.clock.Now() + } +} + +func (m *defaultQueueMetrics) get(item t) { + if m == nil { + return + } + + m.depth.Dec() + m.processingStartTimes[item] = m.clock.Now() + if startTime, exists := m.addTimes[item]; exists { + m.latency.Observe(m.sinceInSeconds(startTime)) + delete(m.addTimes, item) + } +} + +func (m *defaultQueueMetrics) done(item t) { + if m == nil { + return + } + + if startTime, exists := m.processingStartTimes[item]; exists { + m.workDuration.Observe(m.sinceInSeconds(startTime)) + delete(m.processingStartTimes, item) + } +} + +func (m *defaultQueueMetrics) updateUnfinishedWork() { + // Note that a summary metric would be better for this, but prometheus + // doesn't seem to have non-hacky ways to reset the summary metrics. + var total float64 + var oldest float64 + for _, t := range m.processingStartTimes { + age := m.sinceInSeconds(t) + total += age + if age > oldest { + oldest = age + } + } + m.unfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest) +} + +type noMetrics struct{} + +func (noMetrics) add(item t) {} +func (noMetrics) get(item t) {} +func (noMetrics) done(item t) {} +func (noMetrics) updateUnfinishedWork() {} + +// Gets the time since the specified start in seconds. +func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { + return m.clock.Since(start).Seconds() +} + +type retryMetrics interface { + retry() +} + +type defaultRetryMetrics struct { + retries CounterMetric +} + +func (m *defaultRetryMetrics) retry() { + if m == nil { + return + } + + m.retries.Inc() +} + +// MetricsProvider generates various metrics used by the queue. +type MetricsProvider interface { + NewDepthMetric(name string) GaugeMetric + NewAddsMetric(name string) CounterMetric + NewLatencyMetric(name string) HistogramMetric + NewWorkDurationMetric(name string) HistogramMetric + NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric + NewRetriesMetric(name string) CounterMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + +var globalMetricsFactory = queueMetricsFactory{ + metricsProvider: noopMetricsProvider{}, +} + +type queueMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { + mp := f.metricsProvider + if len(name) == 0 || mp == (noopMetricsProvider{}) { + return noMetrics{} + } + return &defaultQueueMetrics{ + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, + } +} + +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + + return &defaultRetryMetrics{ + retries: provider.NewRetriesMetric(name), + } +} + +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. +func SetProvider(metricsProvider MetricsProvider) { + globalMetricsFactory.setProvider(metricsProvider) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go new file mode 100644 index 0000000000..366bf20a31 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "context" + "sync" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +type DoWorkPieceFunc func(piece int) + +type options struct { + chunkSize int +} + +type Options func(*options) + +// WithChunkSize allows to set chunks of work items to the workers, rather than +// processing one by one. +// It is recommended to use this option if the number of pieces significantly +// higher than the number of workers and the work done for each item is small. +func WithChunkSize(c int) func(*options) { + return func(o *options) { + o.chunkSize = c + } +} + +// ParallelizeUntil is a framework that allows for parallelizing N +// independent pieces of work until done or the context is canceled. +func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) { + if pieces == 0 { + return + } + o := options{} + for _, opt := range opts { + opt(&o) + } + chunkSize := o.chunkSize + if chunkSize < 1 { + chunkSize = 1 + } + + chunks := ceilDiv(pieces, chunkSize) + toProcess := make(chan int, chunks) + for i := 0; i < chunks; i++ { + toProcess <- i + } + close(toProcess) + + var stop <-chan struct{} + if ctx != nil { + stop = ctx.Done() + } + if chunks < workers { + workers = chunks + } + wg := sync.WaitGroup{} + wg.Add(workers) + for i := 0; i < workers; i++ { + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + for chunk := range toProcess { + start := chunk * chunkSize + end := start + chunkSize + if end > pieces { + end = pieces + } + for p := start; p < end; p++ { + select { + case <-stop: + return + default: + doWorkPiece(p) + } + } + } + }() + } + wg.Wait() +} + +func ceilDiv(a, b int) int { + return (a + b - 1) / b +} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go new file mode 100644 index 0000000000..380c064552 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -0,0 +1,328 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +type Interface interface { + Add(item interface{}) + Len() int + Get() (item interface{}, shutdown bool) + Done(item interface{}) + ShutDown() + ShutDownWithDrain() + ShuttingDown() bool +} + +// QueueConfig specifies optional configurations to customize an Interface. +type QueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker +} + +// New constructs a new work queue (see the package comment). +func New() *Type { + return NewWithConfig(QueueConfig{ + Name: "", + }) +} + +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewWithConfig(config QueueConfig) *Type { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. +func NewNamed(name string) *Type { + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + return newQueue( + config.Clock, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, + ) +} + +func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { + t := &Type{ + clock: c, + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: metrics, + unfinishedWorkUpdatePeriod: updatePeriod, + } + + // Don't start the goroutine for a type of noMetrics so we don't consume + // resources unnecessarily + if _, ok := metrics.(noMetrics); !ok { + go t.updateUnfinishedWorkLoop() + } + + return t +} + +const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond + +// Type is a work queue (see the package comment). +type Type struct { + // queue defines the order in which we will work on items. Every + // element of queue should be in the dirty set and not in the + // processing set. + queue []t + + // dirty defines all of the items that need to be processed. + dirty set + + // Things that are currently being processed are in the processing set. + // These things may be simultaneously in the dirty set. When we finish + // processing something and remove it from this set, we'll check if + // it's in the dirty set, and if so, add it to the queue. + processing set + + cond *sync.Cond + + shuttingDown bool + drain bool + + metrics queueMetrics + + unfinishedWorkUpdatePeriod time.Duration + clock clock.WithTicker +} + +type empty struct{} +type t interface{} +type set map[t]empty + +func (s set) has(item t) bool { + _, exists := s[item] + return exists +} + +func (s set) insert(item t) { + s[item] = empty{} +} + +func (s set) delete(item t) { + delete(s, item) +} + +func (s set) len() int { + return len(s) +} + +// Add marks item as needing processing. +func (q *Type) Add(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if q.shuttingDown { + return + } + if q.dirty.has(item) { + return + } + + q.metrics.add(item) + + q.dirty.insert(item) + if q.processing.has(item) { + return + } + + q.queue = append(q.queue, item) + q.cond.Signal() +} + +// Len returns the current queue length, for informational purposes only. You +// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular +// value, that can't be synchronized properly. +func (q *Type) Len() int { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return len(q.queue) +} + +// Get blocks until it can return an item to be processed. If shutdown = true, +// the caller should end their goroutine. You must call Done with item when you +// have finished processing it. +func (q *Type) Get() (item interface{}, shutdown bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + for len(q.queue) == 0 && !q.shuttingDown { + q.cond.Wait() + } + if len(q.queue) == 0 { + // We must be shutting down. + return nil, true + } + + item = q.queue[0] + // The underlying array still exists and reference this object, so the object will not be garbage collected. + q.queue[0] = nil + q.queue = q.queue[1:] + + q.metrics.get(item) + + q.processing.insert(item) + q.dirty.delete(item) + + return item, false +} + +// Done marks item as done processing, and if it has been marked as dirty again +// while it was being processed, it will be re-added to the queue for +// re-processing. +func (q *Type) Done(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.metrics.done(item) + + q.processing.delete(item) + if q.dirty.has(item) { + q.queue = append(q.queue, item) + q.cond.Signal() + } else if q.processing.len() == 0 { + q.cond.Signal() + } +} + +// ShutDown will cause q to ignore all new items added to it and +// immediately instruct the worker goroutines to exit. +func (q *Type) ShutDown() { + q.setDrain(false) + q.shutdown() +} + +// ShutDownWithDrain will cause q to ignore all new items added to it. As soon +// as the worker goroutines have "drained", i.e: finished processing and called +// Done on all existing items in the queue; they will be instructed to exit and +// ShutDownWithDrain will return. Hence: a strict requirement for using this is; +// your workers must ensure that Done is called on all items in the queue once +// the shut down has been initiated, if that is not the case: this will block +// indefinitely. It is, however, safe to call ShutDown after having called +// ShutDownWithDrain, as to force the queue shut down to terminate immediately +// without waiting for the drainage. +func (q *Type) ShutDownWithDrain() { + q.setDrain(true) + q.shutdown() + for q.isProcessing() && q.shouldDrain() { + q.waitForProcessing() + } +} + +// isProcessing indicates if there are still items on the work queue being +// processed. It's used to drain the work queue on an eventual shutdown. +func (q *Type) isProcessing() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.processing.len() != 0 +} + +// waitForProcessing waits for the worker goroutines to finish processing items +// and call Done on them. +func (q *Type) waitForProcessing() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + // Ensure that we do not wait on a queue which is already empty, as that + // could result in waiting for Done to be called on items in an empty queue + // which has already been shut down, which will result in waiting + // indefinitely. + if q.processing.len() == 0 { + return + } + q.cond.Wait() +} + +func (q *Type) setDrain(shouldDrain bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.drain = shouldDrain +} + +func (q *Type) shouldDrain() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.drain +} + +func (q *Type) shutdown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.shuttingDown = true + q.cond.Broadcast() +} + +func (q *Type) ShuttingDown() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + return q.shuttingDown +} + +func (q *Type) updateUnfinishedWorkLoop() { + t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) + defer t.Stop() + for range t.C() { + if !func() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if !q.shuttingDown { + q.metrics.updateUnfinishedWork() + return true + } + return false + + }() { + return + } + } +} diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go new file mode 100644 index 0000000000..3e4016fb04 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import "k8s.io/utils/clock" + +// RateLimitingInterface is an interface that rate limits items being added to the queue. +type RateLimitingInterface interface { + DelayingInterface + + // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok + AddRateLimited(item interface{}) + + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing + // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you + // still have to call `Done` on the queue. + Forget(item interface{}) + + // NumRequeues returns back how many times the item was requeued + NumRequeues(item interface{}) int +} + +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. + +type RateLimitingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue DelayingInterface +} + +// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewRateLimitingQueueWithConfig instead and specify a name. +func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return &rateLimitingType{ + DelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, + } +} + +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. +func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) +} + +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. +func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) +} + +// rateLimitingType wraps an Interface and provides rateLimited re-enquing +type rateLimitingType struct { + DelayingInterface + + rateLimiter RateLimiter +} + +// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok +func (q *rateLimitingType) AddRateLimited(item interface{}) { + q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +} + +func (q *rateLimitingType) NumRequeues(item interface{}) int { + return q.rateLimiter.NumRequeues(item) +} + +func (q *rateLimitingType) Forget(item interface{}) { + q.rateLimiter.Forget(item) +} diff --git a/vendor/k8s.io/klog/v2/.gitignore b/vendor/k8s.io/klog/v2/.gitignore new file mode 100644 index 0000000000..0aa2002392 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.gitignore @@ -0,0 +1,17 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Eclipse files +.classpath +.project +.settings/** + +# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA +.idea/ +*.iml + +# Vscode files +.vscode diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 0000000000..0d77d65f06 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/CONTRIBUTING.md b/vendor/k8s.io/klog/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..2641b1f41b --- /dev/null +++ b/vendor/k8s.io/klog/v2/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/v2/LICENSE b/vendor/k8s.io/klog/v2/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/k8s.io/klog/v2/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS new file mode 100644 index 0000000000..a2fe8f351b --- /dev/null +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - harshanarayana + - pohly +approvers: + - dims + - thockin + - serathius +emeritus_approvers: + - brancz + - justinsb + - lavalamp + - piosz + - tallclair diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md new file mode 100644 index 0000000000..d45cbe1720 --- /dev/null +++ b/vendor/k8s.io/klog/v2/README.md @@ -0,0 +1,118 @@ +klog +==== + +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. + +---- + +How to use klog +=============== +- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). + +**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. + +### Coexisting with klog/v2 + +See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/klog) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation of the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/v2/RELEASE.md b/vendor/k8s.io/klog/v2/RELEASE.md new file mode 100644 index 0000000000..b53eb960ce --- /dev/null +++ b/vendor/k8s.io/klog/v2/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md new file mode 100644 index 0000000000..2083d44cdf --- /dev/null +++ b/vendor/k8s.io/klog/v2/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS new file mode 100644 index 0000000000..6128a58699 --- /dev/null +++ b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/v2/code-of-conduct.md b/vendor/k8s.io/klog/v2/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 0000000000..005513f2a7 --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,212 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + logging.loggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&logging.loggerOptions) + } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + logging.logger = nil + logging.loggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +func EnableContextualLogging(enabled bool) { + logging.contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +func FromContext(ctx context.Context) Logger { + if logging.contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +func Background() Logger { + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is + // only true if a logger was set. + return logging.logger.Logger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if logging.contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +func LoggerWithName(logger Logger, name string) Logger { + if logging.contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +func NewContext(ctx context.Context, logger Logger) context.Context { + if logging.contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 0000000000..320a147728 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 0000000000..63995ca6db --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go new file mode 100644 index 0000000000..602c3ed9e6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/imports.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" +) + +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 0000000000..46de00fb06 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,184 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. +} + +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, +} + +// GetBuffer returns a new, ready-to-use buffer. +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() + return b +} + +// PutBuffer returns a buffer to the free list. +func PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. + return + } + + buffers.Put(b) +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 0000000000..03d692c8f8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 0000000000..cc11bb4802 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ Clock = RealClock{} + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 0000000000..f27bd14472 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 0000000000..d1a4751c94 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,292 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-logr/logr" +) + +type textWriter interface { + WriteText(*bytes.Buffer) +} + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + merged = append(merged, key, first[i+1]) + } + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged +} + +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string + +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + f.KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + f.KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + f.KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + f.KVFormat(b, second[len(second)-1], missingValue) + } +} + +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + f.KVFormat(b, k, v) + } +} + +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} + +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') + if f.AnyToStringHook != nil { + b.WriteString(f.AnyToStringHook(v)) + return + } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return + } + // Remove trailing newline. + b.Truncate(b.Len() - 1) +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteByte('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + +func writeStringValue(b *bytes.Buffer, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 0000000000..d9c7d15467 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 0000000000..89acf97723 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 0000000000..30fa1834f0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 0000000000..21f1697d09 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 0000000000..786af74bfd --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,212 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() + } + return ref.Name +} + +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return objectRefs +} + +func (ks kobjSlice) process() (objs []interface{}, err string) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, "" + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Sprintf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("null") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as null. + out.Write(nilToken) + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{','}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).WriteText(out) + } else { + fmt.Fprintf(out, `""`, item) + return + } + } +} diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 0000000000..5522c84c77 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go new file mode 100644 index 0000000000..72502db3ae --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog.go @@ -0,0 +1,1729 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// klog.Info("Prepare to repel boarders") +// +// klog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if klog.V(2) { +// klog.Info("Starting transaction...") +// } +// +// klog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to standard error. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=true +// Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +package klog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" +) + +// severityValue identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severityValue struct { + severity.Severity +} + +// get returns the value of the severity. +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) +} + +// set sets the value of the severity. +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) +} + +// Get is part of the flag.Getter interface. +func (s *severityValue) Get() interface{} { + return s.Severity +} + +// Set is part of the flag.Value interface. +func (s *severityValue) Set(value string) error { + var threshold severity.Severity + // Is it a known name? + if v, ok := severity.ByName(value); ok { + threshold = v + } else { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + threshold = severity.Severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Set will sets module value +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return nil, errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return nil, errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return nil, errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + return filter, nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Set will sets backtrace value +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = 0 + t.file = "" + return nil + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +var logging loggingT +var commandLine flag.FlagSet + +// init sets up the defaults and creates command line flags. +func init() { + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") + commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") + logging.setVState(0, nil, false) + commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") + commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") + commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") + commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + logging.settings.contextualLoggingEnabled = true + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) +} + +// InitFlags is for explicitly initializing the flags. +// It may get called repeatedly for different flagsets, but not +// twice for the same one. May get called concurrently +// to other goroutines using klog. However, only some flags +// may get set concurrently (see implementation). +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + + commandLine.VisitAll(func(f *flag.Flag) { + flagset.Var(f.Value, f.Name, f.Usage) + }) +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *logWriter + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severityValue // The -stderrthreshold flag. + + // Access to all of the following fields must be protected via a mutex. + + // file holds writer for each of the log types. + file [severity.NumSeverity]flushSyncWriter + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration + + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log_dir option. + logFile string + + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool + + // If true, messages will not be propagated to lower severity log levels + oneOutput bool + + // If set, all output will be filtered through the filter. + filter LogFilter +} + +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + copy(filter, s.vmodule.filter) + s.vmodule.filter = filter + + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + l.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&l.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + l.vmodule.filter = filter + l.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&l.filterLength, int32(len(filter))) + l.verbosity.set(verbosity) +} + +var timeNow = time.Now // Stubbed out for testing. + +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, + } +} + +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() +} + +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + +where the fields are defined as follows: + + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + } + return l.formatHeader(s, file, line, timeNow()), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { + buf := buffer.GetBuffer() + if l.skipHeaders { + return buf + } + buf.FormatHeader(s, file, line, now) + return buf +} + +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) +} + +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + + buf, file, line := l.header(s, depth) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() + } + if filter != nil { + args = filter.Filter(args) + } + fmt.Fprintln(buf, args...) + l.output(s, logger, buf, depth, file, line, false) +} + +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { + l.printDepth(s, logger, filter, 1, args...) +} + +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() + } + if filter != nil { + args = filter.Filter(args) + } + fmt.Fprint(buf, args...) + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, logger, buf, depth, file, line, false) +} + +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + + buf, file, line := l.header(s, depth) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() + } + if filter != nil { + format, args = filter.FilterF(format, args) + } + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, logger, buf, depth, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line, timeNow()) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() + } + if filter != nil { + args = filter.Filter(args) + } + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) +} + +// if logger is specified, will call logger.Error, otherwise output with logging module. +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } + if logger != nil { + logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) + return + } + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) +} + +// if logger is specified, will call logger.Info, otherwise output with logging module. +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } + if logger != nil { + logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) + return + } + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) +} + +// printS is called from infoS and errorS if logger is not specified. +// set log severity by s +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) + // Make the buffer available for reuse. + buffer.PutBuffer(b) +} + +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + for s := severity.FatalLog; s >= severity.InfoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + +// LogToStderr sets whether to log exclusively to stderr, bypassing outputs +func LogToStderr(stderr bool) { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.toStderr = stderr +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { + var isLocked = true + l.mu.Lock() + defer func() { + if isLocked { + // Unlock before returning in case that it wasn't done already. + l.mu.Unlock() + } + }() + + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(dbg.Stacks(false)) + } + } + data := buf.Bytes() + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) + } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } + } + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + _, _ = l.file[severity.InfoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + if l.oneOutput { + _, _ = l.file[s].Write(data) + } else { + switch s { + case severity.FatalLog: + _, _ = l.file[severity.FatalLog].Write(data) + fallthrough + case severity.ErrorLog: + _, _ = l.file[severity.ErrorLog].Write(data) + fallthrough + case severity.WarningLog: + _, _ = l.file[severity.WarningLog].Write(data) + fallthrough + case severity.InfoLog: + _, _ = l.file[severity.InfoLog].Write(data) + } + } + } + } + if s == severity.FatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) + } + + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := severity.FatalLog; log >= severity.InfoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + _, _ = f.Write(trace) + } + } + l.mu.Unlock() + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + buffer.PutBuffer(buf) + + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + OsExit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity.Severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severity.Name[sb.sev], now, startup) + if err != nil { + return err + } + if startup { + fileInfo, err := sb.file.Stat() + if err != nil { + return fmt.Errorf("file stat could not get fileinfo: %v", err) + } + // init file size + sb.nbytes = uint64(fileInfo.Size()) + } else { + sb.nbytes = 0 + } + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + if sb.logger.skipLogHeaders { + return nil + } + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), + } + if err := sb.rotateFile(now, true); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 5 * time.Second + +// flushDaemon periodically flushes the log file buffers. +type flushDaemon struct { + mu sync.Mutex + clock clock.Clock + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} + } + return &flushDaemon{ + flush: flush, + clock: tickClock, + } +} + +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return + } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running, and flushes once. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers again by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := severity.FatalLog; s >= severity.InfoLog; s-- { + file := l.file[s] + if file != nil { + _ = file.Flush() // ignore error + _ = file.Sync() // ignore error + } + } + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity.Severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + file = strings.TrimSuffix(file, ".go") + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose struct { + enabled bool + logger *logWriter +} + +func newVerbose(level Level, b bool) Verbose { + if logging.logger == nil { + return Verbose{b, nil} + } + v := logging.logger.V(int(level)) + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} +} + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a struct of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// +// if klog.V(2).Enabled() { klog.Info("log this") } +// +// or +// +// klog.V(2).Info("log this") +// +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and -vmodule flags; both are off by default. The V call will log if its level +// is less than or equal to the value of the -v flag, or alternatively if its level is +// less than or equal to the value of the -vmodule pattern matching the source file +// containing the call. +func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return newVerbose(level, true) + } + + // It's off globally but vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { + return newVerbose(level, false) + } + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := logging.pcs[0] - 1 + v, ok := logging.vmap[pc] + if !ok { + v = logging.setV(pc) + } + return newVerbose(level, v >= level) + } + return newVerbose(level, false) +} + +// Enabled will return true if this log level is enabled, guarded by the value +// of v. +// See the documentation of V for usage. +func (v Verbose) Enabled() bool { + return v.enabled +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v.enabled { + logging.print(severity.InfoLog, v.logger, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v.enabled { + logging.println(severity.InfoLog, v.logger, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v.enabled { + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) + } +} + +// InfoS is equivalent to the global InfoS function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) + } +} + +// InfoSDepth acts as InfoS but uses depth to determine which call frame to log. +// InfoSDepth(0, "msg") is the same as InfoS("msg"). +func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) +} + +// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) + } +} + +// Deprecated: Use ErrorS instead. +func (v Verbose) Error(err error, msg string, args ...interface{}) { + if v.enabled { + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) + } +} + +// ErrorS is equivalent to the global Error function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Infoln(args ...interface{}) { + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) +} + +// InfoS structured logs to the INFO log. +// The msg argument used to add constant description to the log line. +// The key/value pairs would be join by "=" ; a newline is always appended. +// +// Basic examples: +// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready") +// output: +// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" +func InfoS(msg string, keysAndValues ...interface{}) { + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Warningln(args ...interface{}) { + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Errorln(args ...interface{}) { + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) +} + +// ErrorS structured logs to the ERROR, WARNING, and INFO logs. +// the err argument used as "err" field of log line. +// The msg argument used to add constant description to the log line. +// The key/value pairs would be join by "=" ; a newline is always appended. +// +// Basic examples: +// >> klog.ErrorS(err, "Failed to update pod status") +// output: +// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" +func ErrorS(err error, msg string, keysAndValues ...interface{}) { + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) +} + +// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. +// ErrorSDepth(0, "msg") is the same as ErrorS("msg"). +func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls OsExit(255). +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Fatalln(args ...interface{}) { + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls OsExit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) +} + +// LogFilter is a collection of functions that can filter all logging calls, +// e.g. for sanitization of arguments and prevent accidental leaking of secrets. +type LogFilter interface { + Filter(args []interface{}) []interface{} + FilterF(format string, args []interface{}) (string, []interface{}) + FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) +} + +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogFilter(filter LogFilter) { + logging.filter = filter +} diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go new file mode 100644 index 0000000000..8bee16204d --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -0,0 +1,130 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package klog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +func createLogDirs() { + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" + userNameOnce sync.Once +) + +func init() { + if h, err := os.Hostname(); err == nil { + host = shortHostname(h) + } +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + getUserName(), + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := openOrCreate(logging.logFile, startup) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := openOrCreate(fname, startup) + if err == nil { + symlink := filepath.Join(dir, link) + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go new file mode 100644 index 0000000000..aa46726851 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_others.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package klog + +import ( + "os/user" +) + +func getUserName() string { + userNameOnce.Do(func() { + current, err := user.Current() + if err == nil { + userName = current.Username + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go new file mode 100644 index 0000000000..2517f9c538 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package klog + +import ( + "os" + "strings" +) + +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 0000000000..efec96fd45 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,105 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) +} + +func (l *klogger) Enabled(level int) bool { + return VDepth(l.callDepth+1, Level(level)).Enabled() +} + +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + ErrorSDepth(l.callDepth+1, err, msg, merged...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '.' characters to separate name elements. Callers should not pass '.' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true + } + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 0000000000..f7bf740306 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr/slogr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) slogr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ slogr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/utils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/utils/clock/README.md b/vendor/k8s.io/utils/clock/README.md new file mode 100644 index 0000000000..ad2a8868aa --- /dev/null +++ b/vendor/k8s.io/utils/clock/README.md @@ -0,0 +1,4 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go new file mode 100644 index 0000000000..b8b6af5c81 --- /dev/null +++ b/vendor/k8s.io/utils/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7df038cce2..ac9fe0d6eb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,7 +20,7 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud @@ -29,6 +29,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared @@ -41,7 +42,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing # github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -100,7 +101,7 @@ github.com/VictoriaMetrics/fastcache ## explicit github.com/alecthomas/template github.com/alecthomas/template/parse -# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 +# github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 ## explicit; go 1.15 github.com/alecthomas/units # github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a @@ -120,8 +121,8 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.45.25 -## explicit; go 1.11 +# github.com/aws/aws-sdk-go v1.48.14 +## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/awserr @@ -250,6 +251,9 @@ github.com/aws/smithy-go/rand github.com/aws/smithy-go/time github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io +# github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 +## explicit; go 1.20 +github.com/bboreham/go-loser # github.com/benbjohnson/clock v1.3.5 ## explicit; go 1.15 github.com/benbjohnson/clock @@ -278,8 +282,8 @@ github.com/coreos/go-semver/semver ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/journal -# github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52 -## explicit; go 1.19 +# github.com/cortexproject/promqlsmith v0.0.0-20240115062247-9ed0dfba956d +## explicit; go 1.20 github.com/cortexproject/promqlsmith # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit @@ -319,11 +323,11 @@ github.com/fatih/color # github.com/felixge/fgprof v0.9.3 ## explicit; go 1.14 github.com/felixge/fgprof -# github.com/felixge/httpsnoop v1.0.3 +# github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.6.0 -## explicit; go 1.16 +# github.com/fsnotify/fsnotify v1.7.0 +## explicit; go 1.17 github.com/fsnotify/fsnotify # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 @@ -332,10 +336,11 @@ github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.6.0 ## explicit; go 1.17 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 +# github.com/go-logr/logr v1.3.0 +## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -364,6 +369,7 @@ github.com/go-openapi/loads # github.com/go-openapi/runtime v0.26.0 ## explicit; go 1.18 github.com/go-openapi/runtime +github.com/go-openapi/runtime/client github.com/go-openapi/runtime/flagext github.com/go-openapi/runtime/logger github.com/go-openapi/runtime/middleware @@ -371,10 +377,11 @@ github.com/go-openapi/runtime/middleware/denco github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security +github.com/go-openapi/runtime/yamlpc # github.com/go-openapi/spec v0.20.9 => github.com/go-openapi/spec v0.20.6 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.7 +# github.com/go-openapi/strfmt v0.21.9 ## explicit; go 1.19 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.4 @@ -450,7 +457,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 +# github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/s2a-go v0.1.7 @@ -512,7 +519,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2/util/metautils github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.25.1 +# github.com/hashicorp/consul/api v1.26.1 ## explicit; go 1.19 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 @@ -539,6 +546,9 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.6 ## explicit; go 1.19 github.com/hashicorp/go-sockaddr +# github.com/hashicorp/go-version v1.6.0 +## explicit +github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.6.0 ## explicit; go 1.12 github.com/hashicorp/golang-lru @@ -571,7 +581,7 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.17.3 +# github.com/klauspost/compress v1.17.4 ## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -622,7 +632,10 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 ## explicit; go 1.19 github.com/matttproud/golang_protobuf_extensions/v2/pbutil -# github.com/miekg/dns v1.1.56 +# github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a +## explicit; go 1.14 +github.com/metalmatze/signal/server/signalhttp +# github.com/miekg/dns v1.1.57 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 @@ -695,12 +708,21 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/prometheus-community/prom-label-proxy v0.7.0 +## explicit; go 1.19 +github.com/prometheus-community/prom-label-proxy/injectproxy # github.com/prometheus/alertmanager v0.26.0 ## explicit; go 1.18 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics github.com/prometheus/alertmanager/api/v1 github.com/prometheus/alertmanager/api/v2 +github.com/prometheus/alertmanager/api/v2/client +github.com/prometheus/alertmanager/api/v2/client/alert +github.com/prometheus/alertmanager/api/v2/client/alertgroup +github.com/prometheus/alertmanager/api/v2/client/general +github.com/prometheus/alertmanager/api/v2/client/receiver +github.com/prometheus/alertmanager/api/v2/client/silence github.com/prometheus/alertmanager/api/v2/models github.com/prometheus/alertmanager/api/v2/restapi github.com/prometheus/alertmanager/api/v2/restapi/operations @@ -741,7 +763,7 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.17.0 +# github.com/prometheus/client_golang v1.18.0 ## explicit; go 1.19 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -753,10 +775,11 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/push github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint +github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.5.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.45.0 +# github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 ## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -770,12 +793,12 @@ github.com/prometheus/common/sigv4 # github.com/prometheus/exporter-toolkit v0.10.0 ## explicit; go 1.18 github.com/prometheus/exporter-toolkit/web -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11 +# github.com/prometheus/prometheus v0.48.1-0.20240115084306-17920623e7cd ## explicit; go 1.20 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -888,8 +911,8 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing/opentracing -# github.com/thanos-io/promql-engine v0.0.0-20231127105941-257543af55e8 -## explicit; go 1.19 +# github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 +## explicit; go 1.20 github.com/thanos-io/promql-engine/api github.com/thanos-io/promql-engine/engine github.com/thanos-io/promql-engine/execution @@ -909,13 +932,15 @@ github.com/thanos-io/promql-engine/execution/warnings github.com/thanos-io/promql-engine/extlabels github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/query -# github.com/thanos-io/thanos v0.32.5-0.20231212162152-fc1a6edf7acc +github.com/thanos-io/promql-engine/ringbuffer +# github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d ## explicit; go 1.21 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader github.com/thanos-io/thanos/pkg/block/metadata github.com/thanos-io/thanos/pkg/cache github.com/thanos-io/thanos/pkg/cacheutil +github.com/thanos-io/thanos/pkg/clientconfig github.com/thanos-io/thanos/pkg/compact github.com/thanos-io/thanos/pkg/compact/downsample github.com/thanos-io/thanos/pkg/component @@ -932,7 +957,6 @@ github.com/thanos-io/thanos/pkg/extkingpin github.com/thanos-io/thanos/pkg/extprom github.com/thanos-io/thanos/pkg/extprom/http github.com/thanos-io/thanos/pkg/gate -github.com/thanos-io/thanos/pkg/httpconfig github.com/thanos-io/thanos/pkg/info/infopb github.com/thanos-io/thanos/pkg/metadata/metadatapb github.com/thanos-io/thanos/pkg/model @@ -1035,7 +1059,7 @@ go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.mongodb.org/mongo-driver v1.12.0 +# go.mongodb.org/mongo-driver v1.13.1 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -1063,10 +1087,10 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/featuregate v0.77.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/featuregate v1.0.0 +## explicit; go 1.20 go.opentelemetry.io/collector/featuregate -# go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 +# go.opentelemetry.io/collector/pdata v1.0.0 ## explicit; go 1.20 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1083,11 +1107,11 @@ go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.88.0 +# go.opentelemetry.io/collector/semconv v0.90.1 ## explicit; go 1.20 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 -## explicit; go 1.19 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 +## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil # go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 @@ -1105,7 +1129,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.19.0 +# go.opentelemetry.io/otel v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1116,28 +1140,30 @@ go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal +go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.21.0 -# go.opentelemetry.io/otel/bridge/opentracing v1.19.0 +# go.opentelemetry.io/otel/bridge/opentracing v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.19.0 +# go.opentelemetry.io/otel/metric v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.19.0 +# go.opentelemetry.io/otel/sdk v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1146,9 +1172,11 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.19.0 +# go.opentelemetry.io/otel/trace v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/noop # go.opentelemetry.io/proto/otlp v1.0.0 ## explicit; go 1.17 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1158,8 +1186,8 @@ go.opentelemetry.io/proto/otlp/trace/v1 # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic -# go.uber.org/goleak v1.2.1 -## explicit; go 1.18 +# go.uber.org/goleak v1.3.0 +## explicit; go 1.20 go.uber.org/goleak go.uber.org/goleak/internal/stack # go.uber.org/multierr v1.11.0 @@ -1195,14 +1223,14 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa +# golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices # golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.18.0 +# golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1220,7 +1248,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.14.0 +# golang.org/x/oauth2 v0.15.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1239,7 +1267,6 @@ golang.org/x/sync/semaphore # golang.org/x/sys v0.15.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry @@ -1256,10 +1283,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.4.0 +# golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.15.0 +# golang.org/x/tools v0.16.0 ## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver @@ -1277,6 +1304,7 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 ## explicit; go 1.18 golang.org/x/xerrors @@ -1286,7 +1314,7 @@ golang.org/x/xerrors/internal gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/internal/asm/f64 -# google.golang.org/api v0.150.0 +# google.golang.org/api v0.153.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1316,17 +1344,17 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails @@ -1451,6 +1479,25 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# k8s.io/apimachinery v0.28.4 +## explicit; go 1.20 +k8s.io/apimachinery/pkg/util/runtime +# k8s.io/client-go v0.28.4 +## explicit; go 1.20 +k8s.io/client-go/tools/metrics +k8s.io/client-go/util/workqueue +# k8s.io/klog/v2 v2.110.1 +## explicit; go 1.13 +k8s.io/klog/v2 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity +k8s.io/klog/v2/internal/sloghandler +# k8s.io/utils v0.0.0-20230711102312-30195339c3c7 +## explicit; go 1.18 +k8s.io/utils/clock # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml From 289c71e45548c4a344ba4eaeabe942132e82c240 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 16 Jan 2024 10:51:12 -0800 Subject: [PATCH 2/3] Compactor should skip retry without failure if there is permission denied error (#5727) * Compactor should skip retry without failure if there is permission denied error Signed-off-by: Alex Le * factored code Signed-off-by: Alex Le --------- Signed-off-by: Alex Le --- pkg/compactor/compactor.go | 34 +++++++ pkg/compactor/compactor_test.go | 100 +++++++++++++++++++ pkg/storage/bucket/client_mock.go | 4 +- pkg/storage/bucket/sse_bucket_client_test.go | 2 +- 4 files changed, 137 insertions(+), 3 deletions(-) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 30b84c2197..35ab450c7a 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -22,7 +22,10 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/storage/bucket" @@ -789,6 +792,10 @@ func (c *Compactor) compactUserWithRetries(ctx context.Context, userID string) e if lastErr == nil { return nil } + if c.isCausedByPermissionDenied(lastErr) { + level.Warn(c.logger).Log("msg", "skipping compactUser due to PermissionDenied", "user", userID, "err", lastErr) + return nil + } retries.Wait() } @@ -1024,3 +1031,30 @@ func (c *Compactor) listTenantsWithMetaSyncDirectories() map[string]struct{} { return result } + +func (c *Compactor) isCausedByPermissionDenied(err error) bool { + cause := errors.Cause(err) + if compact.IsRetryError(cause) || compact.IsHaltError(cause) { + cause = errors.Unwrap(cause) + } + if multiErr, ok := cause.(errutil.NonNilMultiRootError); ok { + for _, err := range multiErr { + if c.isPermissionDeniedErr(err) { + return true + } + } + return false + } + return c.isPermissionDeniedErr(cause) +} + +func (c *Compactor) isPermissionDeniedErr(err error) bool { + if c.bucketClient.IsAccessDeniedErr(err) { + return true + } + s, ok := status.FromError(err) + if !ok { + return false + } + return s.Code() == codes.PermissionDenied +} diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 3a22981379..989bcf9c43 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -2028,3 +2028,103 @@ func TestCompactor_ShouldNotTreatInterruptionsAsErrors(t *testing.T) { require.Contains(t, lines, `level=info component=compactor msg="interrupting compaction of user blocks" user=user-1`) require.NotContains(t, logs.String(), `level=error`) } + +func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSync(t *testing.T) { + t.Parallel() + + ss := bucketindex.Status{Status: bucketindex.Ok, Version: bucketindex.SyncStatusFileVersion} + content, err := json.Marshal(ss) + require.NoError(t, err) + + bucketClient := &bucket.ClientMock{} + bucketClient.MockIter("__markers__", []string{}, nil) + bucketClient.MockIter("", []string{"user-1"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) + bucketClient.MockIter("user-1/markers/", nil, nil) + bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) + bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) + bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) + bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", bucket.ErrKeyPermissionDenied) + bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) + bucketClient.MockGet("user-1/bucket-index-sync-status.json", string(content), nil) + bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) + bucketClient.MockUpload("user-1/bucket-index-sync-status.json", nil) + + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) + + cfg := prepareConfig() + cfg.ShardingEnabled = true + cfg.ShardingRing.InstanceID = "compactor-1" + cfg.ShardingRing.InstanceAddr = "1.2.3.4" + cfg.ShardingRing.KVStore.Mock = ringStore + + c, _, tsdbPlanner, _, _ := prepare(t, cfg, bucketClient, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + + require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) + + // Wait until a run has completed. + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + return prom_testutil.ToFloat64(c.compactionRunsCompleted) + }) + + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) +} + +func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFromBucket(t *testing.T) { + t.Parallel() + + ss := bucketindex.Status{Status: bucketindex.Ok, Version: bucketindex.SyncStatusFileVersion} + content, err := json.Marshal(ss) + require.NoError(t, err) + + bucketClient := &bucket.ClientMock{} + bucketClient.MockIter("__markers__", []string{}, nil) + bucketClient.MockIter("", []string{"user-1"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) + bucketClient.MockIter("user-1/markers/", nil, nil) + bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) + bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) + bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) + bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) + bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) + bucketClient.MockGet("user-1/bucket-index-sync-status.json", string(content), nil) + bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) + bucketClient.MockUpload("user-1/bucket-index-sync-status.json", nil) + + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) + + cfg := prepareConfig() + cfg.ShardingEnabled = true + cfg.ShardingRing.InstanceID = "compactor-1" + cfg.ShardingRing.InstanceAddr = "1.2.3.4" + cfg.ShardingRing.KVStore.Mock = ringStore + + c, _, tsdbPlanner, _, _ := prepare(t, cfg, bucketClient, nil) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, bucket.ErrKeyPermissionDenied) + + require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) + + // Wait until a run has completed. + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + return prom_testutil.ToFloat64(c.compactionRunsCompleted) + }) + + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) +} diff --git a/pkg/storage/bucket/client_mock.go b/pkg/storage/bucket/client_mock.go index ac96370aa9..c5eebbd3f4 100644 --- a/pkg/storage/bucket/client_mock.go +++ b/pkg/storage/bucket/client_mock.go @@ -14,7 +14,7 @@ import ( var ( errObjectDoesNotExist = errors.New("object does not exist") - errKeyPermissionDenied = errors.New("object key permission denied") + ErrKeyPermissionDenied = errors.New("object key permission denied") ) // ClientMock mocks objstore.Bucket @@ -188,7 +188,7 @@ func (m *ClientMock) IsObjNotFoundErr(err error) bool { // IsAccessDeniedErr mocks objstore.Bucket.IsAccessDeniedErr() func (m *ClientMock) IsAccessDeniedErr(err error) bool { - return err == errKeyPermissionDenied + return err == ErrKeyPermissionDenied } // ObjectSize mocks objstore.Bucket.Attributes() diff --git a/pkg/storage/bucket/sse_bucket_client_test.go b/pkg/storage/bucket/sse_bucket_client_test.go index 91f5ba21e0..1b2815982c 100644 --- a/pkg/storage/bucket/sse_bucket_client_test.go +++ b/pkg/storage/bucket/sse_bucket_client_test.go @@ -111,7 +111,7 @@ func Test_shouldWrapSSeErrors(t *testing.T) { bkt := &ClientMock{} - bkt.MockGet("Test", "someContent", errKeyPermissionDenied) + bkt.MockGet("Test", "someContent", ErrKeyPermissionDenied) sseBkt := NewSSEBucketClient("user-1", bkt, cfgProvider) From 55321520be033e72897bcd4ce0b2779080e402da Mon Sep 17 00:00:00 2001 From: Daniel Blando Date: Tue, 16 Jan 2024 13:14:18 -0800 Subject: [PATCH 3/3] Update thanos/objstore (#5729) Signed-off-by: Daniel Deluiggi --- go.mod | 2 +- go.sum | 4 ++-- .../thanos-io/objstore/CHANGELOG.md | 1 + .../github.com/thanos-io/objstore/objstore.go | 23 ++++++++++++++++++- vendor/modules.txt | 2 +- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 518e7e7ee2..64ad8c2a54 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.5 github.com/stretchr/testify v1.8.4 - github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d + github.com/thanos-io/objstore v0.0.0-20240116185442-6ecabdddaab1 github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d github.com/uber/jaeger-client-go v2.30.0+incompatible diff --git a/go.sum b/go.sum index 9f853588ea..fc5feb2ac2 100644 --- a/go.sum +++ b/go.sum @@ -1451,8 +1451,8 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4 github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d h1:rTz1FIWYPS4R7H/hdZPaVRfevHdfv7BOjJ2FQG747KQ= -github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d/go.mod h1:RMvJQnpB4QQiYGg1gF8mnPJg6IkIPY28Buh8f6b+F0c= +github.com/thanos-io/objstore v0.0.0-20240116185442-6ecabdddaab1 h1:8taDMdkqtKO6uB+JKwXnz2jByLwBLY+GnXp3ShXjUKY= +github.com/thanos-io/objstore v0.0.0-20240116185442-6ecabdddaab1/go.mod h1:RMvJQnpB4QQiYGg1gF8mnPJg6IkIPY28Buh8f6b+F0c= github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 h1:4PxgeNqMIWScmt8+oOjG6puF8Dhn2rPGOFBwuIf24SI= github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856/go.mod h1:3pmodeI6v0zeezI1m9dE0ZaUXqiNSceZj1ZrQIXvHE4= github.com/thanos-io/thanos v0.33.1-0.20240115194623-324846f66d5d h1:Y6Jmemwwascqa7nxSKjYF+yaSDtOVmYZ/cENTZAteuc= diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index 6d6aa563bb..90b29a8869 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -38,6 +38,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#85](https://github.com/thanos-io/objstore/pull/85) S3: Allow checksum algorithm to be configured - [#92](https://github.com/thanos-io/objstore/pull/92) GCS: Allow using a gRPC client. - [#94](https://github.com/thanos-io/objstore/pull/94) Allow timingReadCloser to be seeker +- [#96](https://github.com/thanos-io/objstore/pull/96) Allow nopCloserWithObjectSize to be seeker ### Changed - [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index df43035f29..a83dacdf95 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -232,6 +232,19 @@ func NopCloserWithSize(r io.Reader) io.ReadCloser { return nopCloserWithObjectSize{r} } +type nopSeekerCloserWithObjectSize struct{ io.Reader } + +func (nopSeekerCloserWithObjectSize) Close() error { return nil } +func (n nopSeekerCloserWithObjectSize) ObjectSize() (int64, error) { return TryToGetSize(n.Reader) } + +func (n nopSeekerCloserWithObjectSize) Seek(offset int64, whence int) (int64, error) { + return n.Reader.(io.Seeker).Seek(offset, whence) +} + +func nopSeekerCloserWithSize(r io.Reader) io.ReadSeekCloser { + return nopSeekerCloserWithObjectSize{r} +} + // UploadDir uploads all files in srcdir to the bucket with into a top-level directory // named dstdir. It is a caller responsibility to clean partial upload in case of failure. func UploadDir(ctx context.Context, logger log.Logger, bkt Bucket, srcdir, dstdir string, options ...UploadOption) error { @@ -595,8 +608,16 @@ func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) err const op = OpUpload b.ops.WithLabelValues(op).Inc() + _, ok := r.(io.Seeker) + var nopR io.ReadCloser + if ok { + nopR = nopSeekerCloserWithSize(r) + } else { + nopR = NopCloserWithSize(r) + } + trc := newTimingReadCloser( - NopCloserWithSize(r), + nopR, op, b.opsDuration, b.opsFailures, diff --git a/vendor/modules.txt b/vendor/modules.txt index ac9fe0d6eb..26bb3a8ad4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -901,7 +901,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20231231041903-61cfed8cbb9d +# github.com/thanos-io/objstore v0.0.0-20240116185442-6ecabdddaab1 ## explicit; go 1.20 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp