diff --git a/.busybox-versions b/.busybox-versions index b925f4994f..6e255e7e70 100644 --- a/.busybox-versions +++ b/.busybox-versions @@ -1,7 +1,7 @@ # Auto generated by busybox-updater.sh. DO NOT EDIT -amd64=393d14abb68b8b2d88304c72ac25b5ce130aa3a1d57ba7363e2c4d07d294513d -arm64=9fe410fe5b8f283d057939a5b0a6f464ecb4bfe4a07d132d2846cfbe82cf43ea -arm=a237b18458d6bcc8964e59ced627ea46eb9aae68875ea833c61d5050a742e624 -ppc64le=cbb9892625fd0d4c625afe8255fe35699a163bc4d74925dfcca74ee7cc43d4ba -riscv64=fa1350d80e4481d3671d808fbe239e4075205f69c940e7e85711bdc39bf8e181 -s390x=1e3e5a05847ad67da2b148d952931cf6f716a334ab06ea00742560a2ff985c7d +amd64=f173c44fab35484fa0e940e42929efe2a2f506feda431ba72c5f0d79639d7f55 +arm64=6277ab6abe348994989b3959d7c125d7a487012aedb80570ec28652a012c69d6 +arm=31533906c9eadc190de436bcbc021207d90839777c1b95991edd15e7df5d34ad +ppc64le=ace2ea29bf8e4267c293ed5570df249039a0bc3949d3371429a71cf114e8a9e2 +riscv64=ff38cae5b5ed16251631e55156a7d92977bf5a8c4714ff6e3c333f7acb0297e0 +s390x=59d0ed3060aef57d1b23bc353a2223af24a6e1d035486647eb599a77ff2d446e diff --git a/.mdox.validate.yaml b/.mdox.validate.yaml index 73632ba748..94355e319e 100644 --- a/.mdox.validate.yaml +++ b/.mdox.validate.yaml @@ -40,3 +40,5 @@ validators: type: 'ignore' - regex: 'krisztianfekete\.org' type: 'ignore' + - regex: 'twitter\.com' + type: 'ignore' diff --git a/.mdox.yaml b/.mdox.yaml index 4f8a0be006..1c9c4dd589 100644 --- a/.mdox.yaml +++ b/.mdox.yaml @@ -72,6 +72,10 @@ transformations: backMatter: *docBackMatter # Non-versioned element: Blog. + + - glob: "support/*" + path: /../support/* + - glob: "blog/*" path: /../blog/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f6a09133a..a0bccbbaf6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,9 +12,13 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Fixed +- [#7011](https://github.com/thanos-io/thanos/pull/7011) Query Frontend: queries with negative offset should check whether it is cacheable or not. - [#6874](https://github.com/thanos-io/thanos/pull/6874) Sidecar: fix labels returned by 'api/v1/series' in presence of conflicting external and inner labels. +- [#7009](https://github.com/thanos-io/thanos/pull/7009) Rule: Fix spacing error in URL. ### Added + +- [#6756](https://github.com/thanos-io/thanos/pull/6756) Query: Add `query.enable-tenancy` & `query.tenant-label-name` options to allow enforcement of tenancy on the query path, by injecting labels into queries (uses prom-label-proxy internally). - [#6944](https://github.com/thanos-io/thanos/pull/6944) Receive: Added a new flag for maximum retention bytes. - [#6891](https://github.com/thanos-io/thanos/pull/6891) Objstore: Bump `objstore` which adds support for Azure Workload Identity. - [#6453](https://github.com/thanos-io/thanos/pull/6453) Sidecar: Added `--reloader.method` to support configuration reloads via SIHUP signal. @@ -22,22 +26,26 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#6954](https://github.com/thanos-io/thanos/pull/6954) Index Cache: Support tracing for fetch APIs. - [#6943](https://github.com/thanos-io/thanos/pull/6943) Ruler: Added `keep_firing_for` field in alerting rule. - [#6972](https://github.com/thanos-io/thanos/pull/6972) Store Gateway: Apply series limit when streaming series for series actually matched if lazy postings is enabled. +- [#6984](https://github.com/thanos-io/thanos/pull/6984) Store Gateway: Added `--store.index-header-lazy-download-strategy` to specify how to lazily download index headers when lazy mmap is enabled. +- [#6887](https://github.com/thanos-io/thanos/pull/6887) Query Frontend: *breaking :warning:* Add tenant label to relevant exported metrics. Note that this change may cause some pre-existing custom dashboard queries to be incorrect due to the added label. +- [#7028](https://github.com/thanos-io/thanos/pull/7028) Query|Query Frontend: Add new `--query-frontend.enable-x-functions` flag to enable experimental extended functions. ### Changed ### Removed -## [v0.33.0](https://github.com/thanos-io/thanos/tree/release-0.33) - in progress +## [v0.33.0](https://github.com/thanos-io/thanos/tree/release-0.33) - 18.12.2023 ### Fixed - [#6817](https://github.com/thanos-io/thanos/pull/6817) Store Gateway: fix `matchersToPostingGroups` label values variable got shadowed bug. ### Added - +- [#6891](https://github.com/thanos-io/thanos/pull/6891) Objstore: Bump `objstore` which adds support for Azure Workload Identity. - [#6605](https://github.com/thanos-io/thanos/pull/6605) Query Frontend: Support vertical sharding binary expression with metric name when no matching labels specified. - [#6308](https://github.com/thanos-io/thanos/pull/6308) Ruler: Support configuration flag that allows customizing template for alert message. - [#6760](https://github.com/thanos-io/thanos/pull/6760) Query Frontend: Added TLS support in `--query-frontend.downstream-tripper-config` and `--query-frontend.downstream-tripper-config-file` +- [#7004](https://github.com/thanos-io/thanos/pull/7004) Query Frontend: Support documented auto discovery for memcached - [#6749](https://github.com/thanos-io/thanos/pull/6749) Store Gateway: Added `thanos_store_index_cache_fetch_duration_seconds` histogram for tracking latency of fetching data from index cache. - [#6690](https://github.com/thanos-io/thanos/pull/6690) Store: *breaking :warning:* Add tenant label to relevant exported metrics. Note that this change may cause some pre-existing dashboard queries to be incorrect due to the added label. - [#6530](https://github.com/thanos-io/thanos/pull/6530) / [#6690](https://github.com/thanos-io/thanos/pull/6690) Query: Add command line arguments for configuring tenants and forward tenant information to Store Gateway. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index ec38a62d50..9164f720a5 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -2,7 +2,7 @@ | Name | Email | Slack | GitHub | Company | |-----------------------|---------------------------|--------------------------|----------------------------------------------------|---------------------| -| Bartłomiej Płotka | bwplotka@gmail.com | `@bwplotka` | [@bwplotka](https://github.com/bwplotka) | Red Hat | +| Bartłomiej Płotka | bwplotka@gmail.com | `@bwplotka` | [@bwplotka](https://github.com/bwplotka) | Google | | Frederic Branczyk | fbranczyk@gmail.com | `@brancz` | [@brancz](https://github.com/brancz) | Polar Signals | | Giedrius Statkevičius | giedriuswork@gmail.com | `@Giedrius Statkevičius` | [@GiedriusS](https://github.com/GiedriusS) | Vinted | | Kemal Akkoyun | kakkoyun@gmail.com | `@kakkoyun` | [@kakkoyun](https://github.com/kakkoyun) | Polar Signals | diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index f7a76cc0bd..4d831ab6d1 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -102,7 +102,7 @@ func registerQuery(app *extkingpin.App) { defaultEngine := cmd.Flag("query.promql-engine", "Default PromQL engine to use.").Default(string(apiv1.PromqlEnginePrometheus)). Enum(string(apiv1.PromqlEnginePrometheus), string(apiv1.PromqlEngineThanos)) - + extendedFunctionsEnabled := cmd.Flag("query.enable-x-functions", "Whether to enable extended rate functions (xrate, xincrease and xdelta). Only has effect when used with Thanos engine.").Default("false").Bool() promqlQueryMode := cmd.Flag("query.mode", "PromQL query mode. One of: local, distributed."). Hidden(). Default(string(queryModeLocal)). @@ -220,6 +220,8 @@ func registerQuery(app *extkingpin.App) { tenantHeader := cmd.Flag("query.tenant-header", "HTTP header to determine tenant.").Default(tenancy.DefaultTenantHeader).String() defaultTenant := cmd.Flag("query.default-tenant-id", "Default tenant ID to use if tenant header is not present").Default(tenancy.DefaultTenant).String() tenantCertField := cmd.Flag("query.tenant-certificate-field", "Use TLS client's certificate field to determine tenant for write requests. Must be one of "+tenancy.CertificateFieldOrganization+", "+tenancy.CertificateFieldOrganizationalUnit+" or "+tenancy.CertificateFieldCommonName+". This setting will cause the query.tenant-header flag value to be ignored.").Default("").Enum("", tenancy.CertificateFieldOrganization, tenancy.CertificateFieldOrganizationalUnit, tenancy.CertificateFieldCommonName) + enforceTenancy := cmd.Flag("query.enforce-tenancy", "Enforce tenancy on Query APIs. Responses are returned only if the label value of the configured tenant-label-name and the value of the tenant header matches.").Default("false").Bool() + tenantLabel := cmd.Flag("query.tenant-label-name", "Label name to use when enforcing tenancy (if --query.enforce-tenancy is enabled).").Default(tenancy.DefaultTenantLabel).String() var storeRateLimits store.SeriesSelectLimits storeRateLimits.RegisterFlags(cmd) @@ -259,7 +261,10 @@ func registerQuery(app *extkingpin.App) { Files: *fileSDFiles, RefreshInterval: *fileSDInterval, } - fileSD = file.NewDiscovery(conf, logger) + var err error + if fileSD, err = file.NewDiscovery(conf, logger, reg); err != nil { + return err + } } if *webRoutePrefix == "" { @@ -339,10 +344,13 @@ func registerQuery(app *extkingpin.App) { *queryTelemetrySeriesQuantiles, *defaultEngine, storeRateLimits, + *extendedFunctionsEnabled, queryMode(*promqlQueryMode), *tenantHeader, *defaultTenant, *tenantCertField, + *enforceTenancy, + *tenantLabel, ) }) } @@ -418,10 +426,13 @@ func runQuery( queryTelemetrySeriesQuantiles []float64, defaultEngine string, storeRateLimits store.SeriesSelectLimits, + extendedFunctionsEnabled bool, queryMode queryMode, tenantHeader string, defaultTenant string, tenantCertField string, + enforceTenancy bool, + tenantLabel string, ) error { if alertQueryURL == "" { lastColon := strings.LastIndex(httpBindAddr, ":") @@ -499,56 +510,29 @@ func runQuery( } var ( - endpoints = query.NewEndpointSet( - time.Now, + endpoints = prepareEndpointSet( + g, logger, reg, - func() (specs []*query.GRPCEndpointSpec) { - // Add strict & static nodes. - for _, addr := range strictStores { - specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) - } - - for _, addr := range strictEndpoints { - specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) - } - - for _, dnsProvider := range []*dns.Provider{ - dnsStoreProvider, - dnsRuleProvider, - dnsExemplarProvider, - dnsMetadataProvider, - dnsTargetProvider, - dnsEndpointProvider, - } { - var tmpSpecs []*query.GRPCEndpointSpec - - for _, addr := range dnsProvider.Addresses() { - tmpSpecs = append(tmpSpecs, query.NewGRPCEndpointSpec(addr, false)) - } - tmpSpecs = removeDuplicateEndpointSpecs(logger, duplicatedStores, tmpSpecs) - specs = append(specs, tmpSpecs...) - } - - for _, eg := range endpointGroupAddrs { - addr := fmt.Sprintf("dns:///%s", eg) - spec := query.NewGRPCEndpointSpec(addr, false, extgrpc.EndpointGroupGRPCOpts()...) - specs = append(specs, spec) - } - - for _, eg := range strictEndpointGroups { - addr := fmt.Sprintf("dns:///%s", eg) - spec := query.NewGRPCEndpointSpec(addr, true, extgrpc.EndpointGroupGRPCOpts()...) - specs = append(specs, spec) - } - - return specs + []*dns.Provider{ + dnsStoreProvider, + dnsRuleProvider, + dnsExemplarProvider, + dnsMetadataProvider, + dnsTargetProvider, + dnsEndpointProvider, }, + duplicatedStores, + strictStores, + strictEndpoints, + endpointGroupAddrs, + strictEndpointGroups, dialOpts, unhealthyStoreTimeout, endpointInfoTimeout, queryConnMetricLabels..., ) + proxy = store.NewProxyStore(logger, reg, endpoints.GetStoreClients, component.Query, selectorLset, storeResponseTimeout, store.RetrievalStrategy(grpcProxyStrategy), options...) rulesProxy = rules.NewProxy(logger, endpoints.GetRulesClients) targetsProxy = targets.NewProxy(logger, endpoints.GetTargetsClients) @@ -563,20 +547,6 @@ func runQuery( ) ) - // Periodically update the store set with the addresses we see in our cluster. - { - ctx, cancel := context.WithCancel(context.Background()) - g.Add(func() error { - return runutil.Repeat(5*time.Second, ctx.Done(), func() error { - endpoints.Update(ctx) - return nil - }) - }, func(error) { - cancel() - endpoints.Close() - }) - } - // Run File Service Discovery and update the store set when the files are modified. if fileSD != nil { var fileSDUpdates chan []*targetgroup.Group @@ -690,6 +660,7 @@ func runQuery( engineFactory := apiv1.NewQueryEngineFactory( engineOpts, remoteEngineEndpoints, + extendedFunctionsEnabled, ) lookbackDeltaCreator := LookbackDeltaFactory(engineOpts, dynamicLookbackDelta) @@ -759,6 +730,8 @@ func runQuery( tenantHeader, defaultTenant, tenantCertField, + enforceTenancy, + tenantLabel, ) api.Register(router.WithPrefix("/api/v1"), tracer, logger, ins, logMiddleware) @@ -858,6 +831,82 @@ func removeDuplicateEndpointSpecs(logger log.Logger, duplicatedStores prometheus return deduplicated } +func prepareEndpointSet( + g *run.Group, + logger log.Logger, + reg *prometheus.Registry, + dnsProviders []*dns.Provider, + duplicatedStores prometheus.Counter, + strictStores []string, + strictEndpoints []string, + endpointGroupAddrs []string, + strictEndpointGroups []string, + dialOpts []grpc.DialOption, + unhealthyStoreTimeout time.Duration, + endpointInfoTimeout time.Duration, + queryConnMetricLabels ...string, +) *query.EndpointSet { + endpointSet := query.NewEndpointSet( + time.Now, + logger, + reg, + func() (specs []*query.GRPCEndpointSpec) { + // Add strict & static nodes. + for _, addr := range strictStores { + specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) + } + + for _, addr := range strictEndpoints { + specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) + } + + for _, dnsProvider := range dnsProviders { + var tmpSpecs []*query.GRPCEndpointSpec + + for _, addr := range dnsProvider.Addresses() { + tmpSpecs = append(tmpSpecs, query.NewGRPCEndpointSpec(addr, false)) + } + tmpSpecs = removeDuplicateEndpointSpecs(logger, duplicatedStores, tmpSpecs) + specs = append(specs, tmpSpecs...) + } + + for _, eg := range endpointGroupAddrs { + addr := fmt.Sprintf("dns:///%s", eg) + spec := query.NewGRPCEndpointSpec(addr, false, extgrpc.EndpointGroupGRPCOpts()...) + specs = append(specs, spec) + } + + for _, eg := range strictEndpointGroups { + addr := fmt.Sprintf("dns:///%s", eg) + spec := query.NewGRPCEndpointSpec(addr, true, extgrpc.EndpointGroupGRPCOpts()...) + specs = append(specs, spec) + } + + return specs + }, + dialOpts, + unhealthyStoreTimeout, + endpointInfoTimeout, + queryConnMetricLabels..., + ) + + // Periodically update the store set with the addresses we see in our cluster. + { + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + return runutil.Repeat(5*time.Second, ctx.Done(), func() error { + endpointSet.Update(ctx) + return nil + }) + }, func(error) { + cancel() + endpointSet.Close() + }) + } + + return endpointSet +} + // LookbackDeltaFactory creates from 1 to 3 lookback deltas depending on // dynamicLookbackDelta and eo.LookbackDelta and returns a function // that returns appropriate lookback delta for given maxSourceResolutionMillis. diff --git a/cmd/thanos/query_frontend.go b/cmd/thanos/query_frontend.go index e96f9b7083..5fa7cf3c5e 100644 --- a/cmd/thanos/query_frontend.go +++ b/cmd/thanos/query_frontend.go @@ -16,6 +16,8 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/promql-engine/execution/parse" "github.com/weaveworks/common/user" "gopkg.in/yaml.v2" @@ -92,6 +94,9 @@ func registerQueryFrontend(app *extkingpin.App) { cmd.Flag("query-range.max-retries-per-request", "Maximum number of retries for a single query range request; beyond this, the downstream error is returned."). Default("5").IntVar(&cfg.QueryRangeConfig.MaxRetries) + cmd.Flag("query-frontend.enable-x-functions", "Enable experimental x- functions in query-frontend. --no-query-frontend.enable-x-functions for disabling."). + Default("false").BoolVar(&cfg.EnableXFunctions) + cmd.Flag("query-range.max-query-length", "Limit the query time range (end - start time) in the query-frontend, 0 disables it."). Default("0").DurationVar((*time.Duration)(&cfg.QueryRangeConfig.Limits.MaxQueryLength)) @@ -285,6 +290,12 @@ func runQueryFrontend( return errors.Wrap(err, "error validating the config") } + if cfg.EnableXFunctions { + for fname, v := range parse.XFunctions { + parser.Functions[fname] = v + } + } + tripperWare, err := queryfrontend.NewTripperware(cfg.Config, reg, logger) if err != nil { return errors.Wrap(err, "setup tripperwares") @@ -322,7 +333,7 @@ func runQueryFrontend( // Configure Request Logging for HTTP calls. logMiddleware := logging.NewHTTPServerMiddleware(logger, httpLogOpts...) - ins := extpromhttp.NewInstrumentationMiddleware(reg, nil) + ins := extpromhttp.NewTenantInstrumentationMiddleware(cfg.TenantHeader, cfg.DefaultTenant, reg, nil) // Start metrics HTTP server. { diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index e8f867347b..fe8e86b139 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -16,6 +16,7 @@ import ( "sort" "strconv" "strings" + texttemplate "text/template" "time" extflag "github.com/efficientgo/tools/extkingpin" @@ -50,18 +51,20 @@ import ( "github.com/thanos-io/thanos/pkg/alert" v1 "github.com/thanos-io/thanos/pkg/api/rule" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/errutil" + "github.com/thanos-io/thanos/pkg/extgrpc" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/extprom" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" + "github.com/thanos-io/thanos/pkg/query" thanosrules "github.com/thanos-io/thanos/pkg/rules" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" @@ -75,14 +78,17 @@ import ( "github.com/thanos-io/thanos/pkg/ui" ) +const dnsSDResolver = "miekgdns" + type ruleConfig struct { http httpConfig grpc grpcConfig web webConfig shipper shipperConfig - query queryConfig - queryConfigYAML []byte + query queryConfig + queryConfigYAML []byte + grpcQueryEndpoints []string alertmgr alertMgrConfig alertmgrsConfigYAML []byte @@ -148,6 +154,9 @@ func registerRule(app *extkingpin.App) { cmd.Flag("restore-ignored-label", "Label names to be ignored when restoring alerts from the remote storage. This is only used in stateless mode."). StringsVar(&conf.ignoredLabelNames) + cmd.Flag("grpc-query-endpoint", "Addresses of Thanos gRPC query API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect Thanos API servers through respective DNS lookups."). + PlaceHolder("").StringsVar(&conf.grpcQueryEndpoints) + conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write configurations, that specify servers where samples should be sent to (see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution()) conf.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, "", false) @@ -193,11 +202,12 @@ func registerRule(app *extkingpin.App) { if err != nil { return err } - if len(conf.query.sdFiles) == 0 && len(conf.query.addrs) == 0 && len(conf.queryConfigYAML) == 0 { - return errors.New("no --query parameter was given") + + if len(conf.query.sdFiles) == 0 && len(conf.query.addrs) == 0 && len(conf.queryConfigYAML) == 0 && len(conf.grpcQueryEndpoints) == 0 { + return errors.New("no query configuration parameter was given") } - if (len(conf.query.sdFiles) != 0 || len(conf.query.addrs) != 0) && len(conf.queryConfigYAML) != 0 { - return errors.New("--query/--query.sd-files and --query.config* parameters cannot be defined at the same time") + if (len(conf.query.sdFiles) != 0 || len(conf.query.addrs) != 0 || len(conf.grpcQueryEndpoints) != 0) && len(conf.queryConfigYAML) != 0 { + return errors.New("--query/--query.sd-files/--grpc-query-endpoint and --query.config* parameters cannot be defined at the same time") } // Parse and check alerting configuration. @@ -224,7 +234,8 @@ func registerRule(app *extkingpin.App) { return errors.Wrap(err, "error while parsing config for request logging") } - return runRule(g, + return runRule( + g, logger, reg, tracer, @@ -304,35 +315,43 @@ func runRule( ) error { metrics := newRuleMetrics(reg) - var queryCfg []httpconfig.Config + var queryCfg []clientconfig.Config var err error if len(conf.queryConfigYAML) > 0 { - queryCfg, err = httpconfig.LoadConfigs(conf.queryConfigYAML) + queryCfg, err = clientconfig.LoadConfigs(conf.queryConfigYAML) if err != nil { return err } } else { - queryCfg, err = httpconfig.BuildConfig(conf.query.addrs) + queryCfg, err = clientconfig.BuildConfigFromHTTPAddresses(conf.query.addrs) if err != nil { return errors.Wrap(err, "query configuration") } // Build the query configuration from the legacy query flags. - var fileSDConfigs []httpconfig.FileSDConfig + var fileSDConfigs []clientconfig.HTTPFileSDConfig if len(conf.query.sdFiles) > 0 { - fileSDConfigs = append(fileSDConfigs, httpconfig.FileSDConfig{ + fileSDConfigs = append(fileSDConfigs, clientconfig.HTTPFileSDConfig{ Files: conf.query.sdFiles, RefreshInterval: model.Duration(conf.query.sdInterval), }) queryCfg = append(queryCfg, - httpconfig.Config{ - EndpointsConfig: httpconfig.EndpointsConfig{ - Scheme: "http", - FileSDConfigs: fileSDConfigs, + clientconfig.Config{ + HTTPConfig: clientconfig.HTTPConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ + Scheme: "http", + FileSDConfigs: fileSDConfigs, + }, }, }, ) } + + grpcQueryCfg, err := clientconfig.BuildConfigFromGRPCAddresses(conf.grpcQueryEndpoints) + if err != nil { + return errors.Wrap(err, "query configuration") + } + queryCfg = append(queryCfg, grpcQueryCfg...) } if err := validateTemplate(*conf.alertmgr.alertSourceTemplate); err != nil { @@ -345,26 +364,97 @@ func runRule( dns.ResolverType(conf.query.dnsSDResolver), ) var ( - queryClients []*httpconfig.Client - promClients []*promclient.Client + queryClients []*clientconfig.HTTPClient + promClients []*promclient.Client + grpcEndpointSet *query.EndpointSet + grpcEndpoints []string ) + queryClientMetrics := extpromhttp.NewClientMetrics(extprom.WrapRegistererWith(prometheus.Labels{"client": "query"}, reg)) + for _, cfg := range queryCfg { - cfg.HTTPClientConfig.ClientMetrics = queryClientMetrics - c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "query") - if err != nil { - return err + if cfg.HTTPConfig.NotEmpty() { + cfg.HTTPConfig.HTTPClientConfig.ClientMetrics = queryClientMetrics + c, err := clientconfig.NewHTTPClient(cfg.HTTPConfig.HTTPClientConfig, "query") + if err != nil { + return err + } + c.Transport = tracing.HTTPTripperware(logger, c.Transport) + queryClient, err := clientconfig.NewClient(logger, cfg.HTTPConfig.EndpointsConfig, c, queryProvider.Clone()) + if err != nil { + return err + } + queryClients = append(queryClients, queryClient) + promClients = append(promClients, promclient.NewClient(queryClient, logger, "thanos-rule")) + // Discover and resolve query addresses. + addDiscoveryGroups(g, queryClient, conf.query.dnsSDInterval) } - c.Transport = tracing.HTTPTripperware(logger, c.Transport) - queryClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, queryProvider.Clone()) + + if cfg.GRPCConfig != nil { + grpcEndpoints = append(grpcEndpoints, cfg.GRPCConfig.EndpointAddrs...) + } + } + + if len(grpcEndpoints) > 0 { + duplicatedGRPCEndpoints := promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_rule_grpc_endpoints_duplicated_total", + Help: "The number of times a duplicated grpc endpoint is detected from the different configs in rule", + }) + + dnsEndpointProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_rule_grpc_endpoints_", reg), + dnsSDResolver, + ) + + dialOpts, err := extgrpc.StoreClientGRPCOpts( + logger, + reg, + tracer, + false, + false, + "", + "", + "", + "", + ) if err != nil { return err } - queryClients = append(queryClients, queryClient) - promClients = append(promClients, promclient.NewClient(queryClient, logger, "thanos-rule")) - // Discover and resolve query addresses. - addDiscoveryGroups(g, queryClient, conf.query.dnsSDInterval) + + grpcEndpointSet = prepareEndpointSet( + g, + logger, + reg, + []*dns.Provider{dnsEndpointProvider}, + duplicatedGRPCEndpoints, + nil, + nil, + nil, + nil, + dialOpts, + 5*time.Minute, + 5*time.Second, + ) + + // Periodically update the GRPC addresses from query config by resolving them using DNS SD if necessary. + { + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + return runutil.Repeat(5*time.Second, ctx.Done(), func() error { + resolveCtx, resolveCancel := context.WithTimeout(ctx, 5*time.Second) + defer resolveCancel() + if err := dnsEndpointProvider.Resolve(resolveCtx, grpcEndpoints); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses passed using grpc query config", "err", err) + } + return nil + }) + }, func(error) { + cancel() + }) + } } + var ( appendable storage.Appendable queryable storage.Queryable @@ -473,13 +563,13 @@ func runRule( ) for _, cfg := range alertingCfg.Alertmanagers { cfg.HTTPClientConfig.ClientMetrics = amClientMetrics - c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager") + c, err := clientconfig.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager") if err != nil { return err } c.Transport = tracing.HTTPTripperware(logger, c.Transport) // Each Alertmanager client has a different list of targets thus each needs its own DNS provider. - amClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) + amClient, err := clientconfig.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) if err != nil { return err } @@ -538,7 +628,7 @@ func runRule( OutageTolerance: conf.outageTolerance, ForGracePeriod: conf.forGracePeriod, }, - queryFuncCreator(logger, queryClients, promClients, metrics.duplicatedQuery, metrics.ruleEvalWarnings, conf.query.httpMethod, conf.query.doNotAddThanosParams), + queryFuncCreator(logger, queryClients, promClients, grpcEndpointSet, metrics.duplicatedQuery, metrics.ruleEvalWarnings, conf.query.httpMethod, conf.query.doNotAddThanosParams), conf.lset, // In our case the querying URL is the external URL because in Prometheus // --web.external-url points to it i.e. it points at something where the user @@ -816,8 +906,9 @@ func labelsTSDBToProm(lset labels.Labels) (res labels.Labels) { func queryFuncCreator( logger log.Logger, - queriers []*httpconfig.Client, + queriers []*clientconfig.HTTPClient, promClients []*promclient.Client, + grpcEndpointSet *query.EndpointSet, duplicatedQuery prometheus.Counter, ruleEvalWarnings *prometheus.CounterVec, httpMethod string, @@ -839,13 +930,13 @@ func queryFuncCreator( panic(errors.Errorf("unknown partial response strategy %v", partialResponseStrategy).Error()) } - return func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { for _, i := range rand.Perm(len(queriers)) { promClient := promClients[i] endpoints := thanosrules.RemoveDuplicateQueryEndpoints(logger, duplicatedQuery, queriers[i].Endpoints()) for _, i := range rand.Perm(len(endpoints)) { span, ctx := tracing.StartSpan(ctx, spanID) - v, warns, err := promClient.PromqlQueryInstant(ctx, endpoints[i], q, t, promclient.QueryOptions{ + v, warns, err := promClient.PromqlQueryInstant(ctx, endpoints[i], qs, t, promclient.QueryOptions{ Deduplicate: true, PartialResponseStrategy: partialResponseStrategy, Method: httpMethod, @@ -854,23 +945,53 @@ func queryFuncCreator( span.Finish() if err != nil { - level.Error(logger).Log("err", err, "query", q) + level.Error(logger).Log("err", err, "query", qs) continue } if len(warns) > 0 { ruleEvalWarnings.WithLabelValues(strings.ToLower(partialResponseStrategy.String())).Inc() // TODO(bwplotka): Propagate those to UI, probably requires changing rule manager code ): - level.Warn(logger).Log("warnings", strings.Join(warns, ", "), "query", q) + level.Warn(logger).Log("warnings", strings.Join(warns, ", "), "query", qs) } return v, nil } } + + if grpcEndpointSet != nil { + queryAPIClients := grpcEndpointSet.GetQueryAPIClients() + for _, i := range rand.Perm(len(queryAPIClients)) { + e := query.NewRemoteEngine(logger, queryAPIClients[i], query.Opts{}) + q, err := e.NewInstantQuery(ctx, nil, qs, t) + if err != nil { + level.Error(logger).Log("err", err, "query", qs) + continue + } + + result := q.Exec(ctx) + v, err := result.Vector() + if err != nil { + level.Error(logger).Log("err", err, "query", qs) + continue + } + + if len(result.Warnings) > 0 { + ruleEvalWarnings.WithLabelValues(strings.ToLower(partialResponseStrategy.String())).Inc() + warnings := make([]string, 0, len(result.Warnings)) + for _, w := range result.Warnings { + warnings = append(warnings, w.Error()) + } + level.Warn(logger).Log("warnings", strings.Join(warnings, ", "), "query", qs) + } + + return v, nil + } + } return nil, errors.Errorf("no query API server reachable") } } } -func addDiscoveryGroups(g *run.Group, c *httpconfig.Client, interval time.Duration) { +func addDiscoveryGroups(g *run.Group, c *clientconfig.HTTPClient, interval time.Duration) { ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { c.Discover(ctx) @@ -939,7 +1060,7 @@ func tableLinkForExpression(tmpl string, expr string) (string, error) { escapedExpression := url.QueryEscape(expr) escapedExpr := Expression{Expr: escapedExpression} - t, err := template.New("url").Parse(tmpl) + t, err := texttemplate.New("url").Parse(tmpl) if err != nil { return "", errors.Wrap(err, "failed to parse template") } diff --git a/cmd/thanos/rule_test.go b/cmd/thanos/rule_test.go index 3ba4d65b25..097b66e65b 100644 --- a/cmd/thanos/rule_test.go +++ b/cmd/thanos/rule_test.go @@ -86,6 +86,12 @@ func Test_tableLinkForExpression(t *testing.T) { expectStr: `/graph?g0.expr=up%7Bapp%3D%22foo%22%7D&g0.tab=1`, expectErr: false, }, + { + template: `/graph?g0.expr={{.Expr}}&g0.tab=1`, + expr: `up{app="foo yoo"}`, + expectStr: `/graph?g0.expr=up%7Bapp%3D%22foo+yoo%22%7D&g0.tab=1`, + expectErr: false, + }, { template: `/graph?g0.expr={{.Expression}}&g0.tab=1`, expr: "test_expr", diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 3b8846d146..74ab3090fa 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -30,11 +30,11 @@ import ( objstoretracing "github.com/thanos-io/objstore/tracing/opentracing" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/exemplars" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" @@ -68,12 +68,12 @@ func registerSidecar(app *extkingpin.App) { if err != nil { return errors.Wrap(err, "getting http client config") } - httpClientConfig, err := httpconfig.NewClientConfigFromYAML(httpConfContentYaml) + httpClientConfig, err := clientconfig.NewHTTPClientConfigFromYAML(httpConfContentYaml) if err != nil { return errors.Wrap(err, "parsing http config YAML") } - httpClient, err := httpconfig.NewHTTPClient(*httpClientConfig, "thanos-sidecar") + httpClient, err := clientconfig.NewHTTPClient(*httpClientConfig, "thanos-sidecar") if err != nil { return errors.Wrap(err, "Improper http client config") } @@ -260,7 +260,7 @@ func runSidecar( }) } { - c := promclient.NewWithTracingClient(logger, httpClient, httpconfig.ThanosUserAgent) + c := promclient.NewWithTracingClient(logger, httpClient, clientconfig.ThanosUserAgent) promStore, err := store.NewPrometheusStore(logger, reg, c, conf.prometheus.url, component.Sidecar, m.Labels, m.Timestamps, m.Version) if err != nil { diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index 9191b77ce8..7d80687ec3 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -28,6 +28,7 @@ import ( blocksAPI "github.com/thanos-io/thanos/pkg/api/blocks" "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/indexheader" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/component" hidden "github.com/thanos-io/thanos/pkg/extflag" @@ -89,6 +90,8 @@ type storeConfig struct { lazyIndexReaderEnabled bool lazyIndexReaderIdleTimeout time.Duration lazyExpandedPostingsEnabled bool + + indexHeaderLazyDownloadStrategy string } func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { @@ -186,6 +189,10 @@ func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("store.enable-lazy-expanded-postings", "If true, Store Gateway will estimate postings size and try to lazily expand postings if it downloads less data than expanding all postings."). Default("false").BoolVar(&sc.lazyExpandedPostingsEnabled) + cmd.Flag("store.index-header-lazy-download-strategy", "Strategy of how to download index headers lazily. Supported values: eager, lazy. If eager, always download index header during initial load. If lazy, download index header during query time."). + Default(string(indexheader.EagerDownloadStrategy)). + EnumVar(&sc.indexHeaderLazyDownloadStrategy, string(indexheader.EagerDownloadStrategy), string(indexheader.LazyDownloadStrategy)) + cmd.Flag("web.disable", "Disable Block Viewer UI.").Default("false").BoolVar(&sc.disableWeb) cmd.Flag("web.external-prefix", "Static prefix for all HTML links and redirect URLs in the bucket web UI interface. Actual endpoints are still served on / or the web.route-prefix. This allows thanos bucket web UI to be served behind a reverse proxy that strips a URL sub-path."). @@ -388,6 +395,9 @@ func runStore( return conf.estimatedMaxChunkSize }), store.WithLazyExpandedPostings(conf.lazyExpandedPostingsEnabled), + store.WithIndexHeaderLazyDownloadStrategy( + indexheader.IndexHeaderLazyDownloadStrategy(conf.indexHeaderLazyDownloadStrategy).StrategyToDownloadFunc(), + ), } if conf.debugLogging { diff --git a/docs/components/query-frontend.md b/docs/components/query-frontend.md index a6fde2ba77..e374f19ec1 100644 --- a/docs/components/query-frontend.md +++ b/docs/components/query-frontend.md @@ -252,6 +252,11 @@ Flags: --query-frontend.downstream-url="http://localhost:9090" URL of downstream Prometheus Query compatible API. + --query-frontend.enable-x-functions + Enable experimental x- + functions in query-frontend. + --no-query-frontend.enable-x-functions for + disabling. --query-frontend.forward-header= ... List of headers forwarded by the query-frontend to downstream queriers, default is empty diff --git a/docs/components/query.md b/docs/components/query.md index 2c4f473464..4584363ba3 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -182,7 +182,7 @@ Available options: ### Partial Response Strategy -// TODO(bwplotka): Update. This will change to "strategy" soon as [PartialResponseStrategy enum here](../../pkg/store/storepb/rpc.proto) + | HTTP URL/FORM parameter | Type | Default | Example | |-------------------------|-----------|-----------------------------------------------|----------------------------------------| @@ -260,6 +260,20 @@ Example file SD file in YAML: `--query.active-query-path` is an option which allows the user to specify a directory which will contain a `queries.active` file to track active queries. To enable this feature, the user has to specify a directory other than "", since that is skipped being the default. +## Tenancy + +### Tenant Metrics + +Tenant information is captured in relevant Thanos exported metrics in the Querier, Query Frontend and Store. In order make use of this functionality requests to the Query/Query Frontend component should include the tenant-id in the appropriate HTTP request header as configured with `--query.tenant-header`. The tenant information is passed through components (including Query Frontend), down to the Thanos Store, enabling per-tenant metrics in these components also. If no tenant header is set to requests to the query component, the default tenant as defined by `--query.tenant-default-id` will be used. + +### Tenant Enforcement + +Enforcement of tenancy can be enabled using `--query.enforce-tenancy`. If enabled, queries will only fetch series containing a specific matcher, while evaluating PromQL expressions. The matcher label name is `--query.tenant-label-name` and the matcher value matches the tenant, as sent to the querier in the HTTP header configured with `--query-tenant-header`. This functionality requires that metrics are injected with a tenant label when ingested into Thanos. This can be done for example by enabling tenancy in the Thanos Receive component. + +In case of nested Thanos Query components, it's important to note that tenancy enforcement will only occur in the querier which the initial request is sent to, the layered queriers will not perform any enforcement. + +Further, note that there are no authentication mechanisms in Thanos, so anyone can set an arbitrary tenant in the HTTP header. It is recommended to use a proxy in front of the querier in case an authentication mechanism is needed. The Query UI also includes an option to set an arbitrary tenant, and should therefore not be exposed to end-users if users should not be able to see each others data. + ## Flags ```$ mdox-exec="thanos query --help" @@ -363,6 +377,14 @@ Flags: --query.default-tenant-id="default-tenant" Default tenant ID to use if tenant header is not present + --query.enable-x-functions + Whether to enable extended rate functions + (xrate, xincrease and xdelta). Only has effect + when used with Thanos engine. + --query.enforce-tenancy Enforce tenancy on Query APIs. Responses + are returned only if the label value of the + configured tenant-label-name and the value of + the tenant header matches. --query.lookback-delta=QUERY.LOOKBACK-DELTA The maximum lookback duration for retrieving metrics during expression evaluations. @@ -415,6 +437,9 @@ Flags: flag value to be ignored. --query.tenant-header="THANOS-TENANT" HTTP header to determine tenant. + --query.tenant-label-name="tenant_id" + Label name to use when enforcing tenancy (if + --query.enforce-tenancy is enabled). --query.timeout=2m Maximum time to process query by query node. --request.logging-config= Alternative to 'request.logging-config-file' diff --git a/docs/components/rule.md b/docs/components/rule.md index 5e6a1a3d8f..044148c3ba 100644 --- a/docs/components/rule.md +++ b/docs/components/rule.md @@ -168,7 +168,7 @@ The most important metrics to alert on are: Those metrics are important for vanilla Prometheus as well, but even more important when we rely on (sometimes WAN) network. -// TODO(bwplotka): Rereview them after recent changes in metrics. + See [alerts](https://github.com/thanos-io/thanos/blob/e3b0baf7de9dde1887253b1bb19d78ae71a01bf8/examples/alerts/alerts.md#ruler) for more example alerts for ruler. @@ -330,6 +330,11 @@ Flags: from other components. --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. + --grpc-query-endpoint= ... + Addresses of Thanos gRPC query API servers + (repeatable). The scheme may be prefixed + with 'dns+' or 'dnssrv+' to detect Thanos API + servers through respective DNS lookups. --grpc-server-max-connection-age=60m The grpc server max connection age. This controls how often to re-establish connections @@ -551,7 +556,9 @@ Supported values for `api_version` are `v1` or `v2`. ### Query API -The `--query.config` and `--query.config-file` flags allow specifying multiple query endpoints. Those entries are treated as a single HA group. This means that query failure is claimed only if the Ruler fails to query all instances. +The `--query.config` and `--query.config-file` flags allow specifying multiple query endpoints. Those entries are treated as a single HA group, where HTTP endpoints are given priority over gRPC Query API endpoints. This means that query failure is claimed only if the Ruler fails to query all instances. + +Rules that produce native histograms (experimental feature) are exclusively supported through the gRPC Query API. However, for all other rules, there is no difference in functionality between HTTP and gRPC. The configuration format is the following: @@ -576,4 +583,6 @@ The configuration format is the following: refresh_interval: 0s scheme: http path_prefix: "" + grpc_config: + endpoint_addresses: [] ``` diff --git a/docs/components/store.md b/docs/components/store.md index 3ba59a2d9e..85fd4ce688 100644 --- a/docs/components/store.md +++ b/docs/components/store.md @@ -193,6 +193,12 @@ Flags: DEPRECATED: use store.limits.request-samples. --store.grpc.touched-series-limit=0 DEPRECATED: use store.limits.request-series. + --store.index-header-lazy-download-strategy=eager + Strategy of how to download index headers + lazily. Supported values: eager, lazy. + If eager, always download index header during + initial load. If lazy, download index header + during query time. --store.limits.request-samples=0 The maximum samples allowed for a single Series request, The Series call fails if diff --git a/docs/getting-started.md b/docs/getting-started.md index 9e0a7a8ed0..da6b414472 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -88,6 +88,10 @@ See up to date [jsonnet mixins](https://github.com/thanos-io/thanos/tree/main/mi ## Talks +* 2023 + * [Planetscale monitoring: Handling billions of active series with Prometheus and Thanos](https://www.youtube.com/watch?v=Or8r46fSaOg) + * [Taming the Tsunami: low latency ingestion of push-based metrics in Prometheus](https://www.youtube.com/watch?v=W81x1j765hc) + * 2022 * [Story of Correlation: Integrating Thanos Metrics with Observability Signals](https://www.youtube.com/watch?v=rWFb01GW0mQ) * [Running the Observability As a Service For Your Teams With Thanos](https://www.youtube.com/watch?v=I4Mfyfd_4M8) diff --git a/docs/operating/multi-tenancy.md b/docs/operating/multi-tenancy.md index a52741505d..22c26f8396 100644 --- a/docs/operating/multi-tenancy.md +++ b/docs/operating/multi-tenancy.md @@ -2,4 +2,4 @@ Thanos supports multi-tenancy by using external labels. For such use cases, the [Thanos Sidecar](../components/sidecar.md) based approach with layered [Thanos Queriers](../components/query.md) is recommended. -You can also use the [Thanos Receiver](../components/receive.md) however, we don't recommend it to achieve a global view of data of a single-tenant. Also note that, multi-tenancy may also be achievable if ingestion is not user-controlled, as then enforcing of labels, for example using the [prom-label-proxy](https://github.com/openshift/prom-label-proxy) (please thoroughly understand the mechanism if intending to employ this mechanism, as the wrong configuration could leak data). +You can also use the [Thanos Receiver](../components/receive.md) however, we don't recommend it to achieve a global view of data of a single-tenant. Also note that, multi-tenancy may also be achievable if ingestion is not user-controlled, as then enforcing of labels, for example using the [prom-label-proxy](https://github.com/prometheus-community/prom-label-proxy) (please thoroughly understand the mechanism if intending to employ this mechanism, as the wrong configuration could leak data). diff --git a/docs/support/welcome.md b/docs/support/welcome.md new file mode 100644 index 0000000000..c4e48c5582 --- /dev/null +++ b/docs/support/welcome.md @@ -0,0 +1,6 @@ +--- +title: Welcome to Support and Training! +author: Thanos Team +--- + +Anyone who has developed a Thanos training program or offers related services can add themselves to this page by opening a pull request against it. diff --git a/go.mod b/go.mod index 824d581cf4..73ae55a2b7 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,9 @@ go 1.21 require ( cloud.google.com/go/storage v1.30.1 // indirect - cloud.google.com/go/trace v1.10.1 + cloud.google.com/go/trace v1.10.4 github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.8.3 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 github.com/alicebob/miniredis/v2 v2.22.0 github.com/blang/semver/v4 v4.0.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b @@ -22,9 +22,9 @@ require ( github.com/fatih/structtag v1.2.0 github.com/felixge/fgprof v0.9.2 github.com/fortytw2/leaktest v1.3.0 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.9 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.1 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -40,11 +40,11 @@ require ( github.com/hashicorp/golang-lru v0.6.0 github.com/jpillora/backoff v1.0.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.2 + github.com/klauspost/compress v1.17.4 github.com/leanovate/gopter v0.2.9 github.com/lightstep/lightstep-tracer-go v0.25.0 github.com/lovoo/gcloud-opentracing v0.3.0 - github.com/miekg/dns v1.1.56 + github.com/miekg/dns v1.1.57 github.com/minio/minio-go/v7 v7.0.61 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 @@ -62,35 +62,35 @@ require ( github.com/prometheus/common v0.45.0 github.com/prometheus/exporter-toolkit v0.10.0 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11 + github.com/prometheus/prometheus v0.48.1-0.20231212213830-d0c2d9c0b9cc github.com/sony/gobreaker v0.5.0 github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20231112185854-37752ee64d98 - github.com/thanos-io/promql-engine v0.0.0-20231013104847-4517c0d5f591 + github.com/thanos-io/promql-engine v0.0.0-20231214130043-41b2cf818e81 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.elastic.co/apm v1.11.0 go.elastic.co/apm/module/apmot v1.11.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/bridge/opentracing v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/bridge/opentracing v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 - go.uber.org/goleak v1.2.1 - golang.org/x/crypto v0.15.0 - golang.org/x/net v0.18.0 + go.uber.org/goleak v1.3.0 + golang.org/x/crypto v0.16.0 + golang.org/x/net v0.19.0 golang.org/x/sync v0.5.0 golang.org/x/text v0.14.0 - golang.org/x/time v0.3.0 - google.golang.org/api v0.147.0 // indirect - google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + golang.org/x/time v0.5.0 + google.golang.org/api v0.153.0 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/grpc v1.59.0 google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -104,12 +104,12 @@ require ( ) require ( - cloud.google.com/go v0.110.8 // indirect - cloud.google.com/go/compute v1.23.0 // indirect - cloud.google.com/go/iam v1.1.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0 @@ -119,26 +119,35 @@ require ( require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/gomega v1.27.10 + github.com/prometheus-community/prom-label-proxy v0.7.0 go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 go4.org/intern v0.0.0-20230525184215-6c62f75575cb - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb ) require ( + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/s2a-go v0.1.7 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo v1.16.5 // indirect github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 // indirect - go.opentelemetry.io/collector/featuregate v0.77.0 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect - go.opentelemetry.io/collector/semconv v0.88.0 // indirect + go.opentelemetry.io/collector/featuregate v1.0.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0 // indirect + go.opentelemetry.io/collector/semconv v0.90.1 // indirect go.opentelemetry.io/contrib/propagators/ot v1.13.0 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + k8s.io/apimachinery v0.28.4 // indirect + k8s.io/client-go v0.28.4 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect ) require ( @@ -150,7 +159,7 @@ require ( github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.45.25 // indirect + github.com/aws/aws-sdk-go v1.48.14 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect @@ -172,10 +181,10 @@ require ( github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/elastic/go-sysinfo v1.8.1 // indirect github.com/elastic/go-windows v1.0.1 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -193,9 +202,9 @@ require ( github.com/gogo/googleapis v1.4.0 // indirect github.com/google/go-cmp v0.6.0 github.com/google/go-querystring v1.1.0 // indirect - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect - github.com/google/uuid v1.3.1 - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 // indirect + github.com/google/uuid v1.4.0 + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -218,7 +227,7 @@ require ( github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/redis/rueidis v1.0.14-go1.18 github.com/rivo/uniseg v0.2.0 // indirect github.com/rs/xid v1.5.0 // indirect @@ -236,18 +245,18 @@ require ( github.com/yusufpapurcu/wmi v1.2.2 // indirect go.elastic.co/apm/module/apmhttp v1.11.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/propagators/aws v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/tools v0.15.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/tools v0.16.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 304bb3365c..9f8d31831b 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,8 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -41,17 +41,17 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/monitoring v1.16.0 h1:rlndy4K8yknMY9JuGe2aK4SbCh21FXoCdX7SAGHmRgI= -cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/monitoring v1.16.3 h1:mf2SN9qSoBtIgiMA4R/y4VADPWZA7VCNJA079qLaZQ8= +cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -64,15 +64,15 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= -cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.10.4 h1:2qOAuAzNezwW3QN+t41BtkDJOG42HywL73q8x/f6fnM= +cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= @@ -84,6 +84,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.8.3 h1:i84ZOPT35YCJROyuf97VP/VEdYhQce/8NTLOWq5tqJw= @@ -115,8 +116,9 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.22.0 h1:lIHHiSkEyS1MkKHCHzN+0mWrA4YdbGdimE5iZ2sHSzo= @@ -141,8 +143,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= -github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.48.14 h1:nVLrp+F84SG+xGiFMfe1TE6ZV6smF+42tuuNgYGV30s= +github.com/aws/aws-sdk-go v1.48.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= @@ -170,6 +172,8 @@ github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -214,8 +218,8 @@ github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52 h1:soTzgUC/F+qtsMbh/IWr3uGwPaXXYEUsTT3zYiXP0yM= -github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52/go.mod h1:8LOFLrqqVfNalbgjKZYdh6Bv/VXLdOV799aJwZJJGOs= +github.com/cortexproject/promqlsmith v0.0.0-20231206201345-a7d143cf5966 h1:KvukHFhAxCBTsWA4CHKVTIZaQ+wHX71ZpqUz2HSOhLw= +github.com/cortexproject/promqlsmith v0.0.0-20231206201345-a7d143cf5966/go.mod h1:89R0fjNCjntWZFJYjZ906Sh0w9VNIkCHA/GwNQ6Zqk0= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -228,14 +232,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= -github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= +github.com/digitalocean/godo v1.106.0 h1:m5iErwl3xHovGFlawd50n54ntgXHt1BLsvU6BXsVxEU= +github.com/digitalocean/godo v1.106.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= -github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -279,16 +283,16 @@ github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4 github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc= github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -309,8 +313,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= @@ -336,6 +340,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= @@ -343,8 +349,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -501,19 +507,19 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -528,8 +534,8 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -558,8 +564,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= @@ -588,6 +594,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -614,8 +622,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= -github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.10 h1:3815Q2Hw/wc4cJ8wD7bwfsmDsdfIEp80B7BQMj0YP2w= +github.com/ionos-cloud/sdk-go/v6 v6.1.10/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -633,6 +641,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -650,8 +659,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -681,8 +690,8 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.25.0 h1:sGVnz8h3jTQuHKMbUe2949nXm3Sg09N1UcR3VoQNN5E= github.com/lightstep/lightstep-tracer-go v0.25.0/go.mod h1:G1ZAEaqTHFPWpWunnbUn1ADEY/Jvzz7jIOaXwAfD6A8= -github.com/linode/linodego v1.24.0 h1:zO+bMdTE6wPccqP7QIkbxAfACX7DjSX6DW9JE/qOKDQ= -github.com/linode/linodego v1.24.0/go.mod h1:cq/ty5BCEQnsO6OjMqD7Q03KCCyB8CNM5E3MNg0LV6M= +github.com/linode/linodego v1.25.0 h1:zYMz0lTasD503jBu3tSRhzEmXHQN1zptCw5o71ibyyU= +github.com/linode/linodego v1.25.0/go.mod h1:BMZI0pMM/YGjBis7pIXDPbcgYfCZLH0/UvzqtsGtG1c= github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -708,10 +717,12 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI= @@ -835,10 +846,13 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= +github.com/prometheus-community/prom-label-proxy v0.7.0 h1:1iNHXF7V8z2iOCinEyxKDUHu2jppPAAd6PmBCi3naok= +github.com/prometheus-community/prom-label-proxy v0.7.0/go.mod h1:wR9C/Mwp5aBbiqM6gQ+FZdFRwL8pCzzhsje8lTAx/aA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -856,6 +870,7 @@ github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -878,10 +893,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11 h1:3wogiAxGft1+Syu99dswxhbWpR4IjLFKQgKxU84wrag= -github.com/prometheus/prometheus v0.48.1-0.20231201222638-e4ec263bcc11/go.mod h1:hCcxMXhfC04Ua9hQi2j43+EGMQhrRNvH8x0LteAnF1I= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.48.1-0.20231212213830-d0c2d9c0b9cc h1:0szqUyAxmyb56oz4LFYYb2G3oBK/Kx9ijH6xpSwK8Ak= +github.com/prometheus/prometheus v0.48.1-0.20231212213830-d0c2d9c0b9cc/go.mod h1:Mion2/PKFmhgQqLN58WTe/1lBjL0Kc513mkLmX8FyOA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= @@ -912,7 +927,6 @@ github.com/shirou/gopsutil/v3 v3.21.2/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0 github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit/v3 v3.0.0 h1:J0QrVhAULISHWN05PeXX/xMqJBjnpl2fAuO8uHdQGsA= github.com/simonpasquier/klog-gokit/v3 v3.0.0/go.mod h1:+WRhGy707Lp2Q4r727m9Oc7FxazOHgW76FIyCr23nus= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -963,8 +977,8 @@ github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1 github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= github.com/thanos-io/objstore v0.0.0-20231112185854-37752ee64d98 h1:gx2MTto1UQRumGoJzY3aFPQ31Ov3nOV7NaD7j6q288k= github.com/thanos-io/objstore v0.0.0-20231112185854-37752ee64d98/go.mod h1:JauBAcJ61tRSv9widgISVmA6akQXDeUMXBrVmWW4xog= -github.com/thanos-io/promql-engine v0.0.0-20231013104847-4517c0d5f591 h1:6bZbFM+Mvy2kL8BeL8TJ5+5pV3sUR2PSLaZyw911rtQ= -github.com/thanos-io/promql-engine v0.0.0-20231013104847-4517c0d5f591/go.mod h1:vfXJv1JXNdLfHnjsHsLLJl5tyI7KblF76Wo5lZ9YC4Q= +github.com/thanos-io/promql-engine v0.0.0-20231214130043-41b2cf818e81 h1:EdFfVjUhwfj6JRjuZf+EchsxBD+60T6X0rPbzhraJj4= +github.com/thanos-io/promql-engine v0.0.0-20231214130043-41b2cf818e81/go.mod h1:uzl2mg4OyB9A54Hhrk/wViZiZoHT2o2qq+NGQkEmfzs= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1023,8 +1037,8 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1036,14 +1050,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU= -go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= -go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= -go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= +go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/semconv v0.90.1 h1:2fkQZbefQBbIcNb9Rk1mRcWlFZgQOk7CpST1e1BK8eg= +go.opentelemetry.io/collector/semconv v0.90.1/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= go.opentelemetry.io/contrib/propagators/aws v1.13.0 h1:9qOAQhTeJGiaYNfCCnRmL12XZGIaxclqS5yfkSXpn8o= @@ -1056,24 +1070,24 @@ go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYoh go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0 h1:E+RlfFhGZ5Tk0wO1oOJYC0Il4Q7SjE8ZMl8x/VTK9Pk= go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0/go.mod h1:cuBMmL+iGJ4UpZi6dykQlIUxqKSMkp5eu1C1UjUJYFI= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/bridge/opentracing v1.19.0 h1:HCvsUi6uuhat/nAuxCl41A+OPxXXPxMNTRxKZx7hTW4= -go.opentelemetry.io/otel/bridge/opentracing v1.19.0/go.mod h1:n46h+7L/lcSuHhpqJQiUdb4eux19NNxTuWJ/ZMnIQMg= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/bridge/opentracing v1.21.0 h1:7AfuSFhyvBmt/0YskcdxDyTdHPjQfrHcZQo6Zu5srF4= +go.opentelemetry.io/otel/bridge/opentracing v1.21.0/go.mod h1:giUOMajCV30LvlPHnzRDNBvDV3/NmrGVrqCp/1suDok= go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA= go.opentelemetry.io/otel/exporters/jaeger v1.16.0/go.mod h1:grYbBo/5afWlPpdPZYhyn78Bk04hnvxn2+hvxQhKIQM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= @@ -1085,8 +1099,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1111,8 +1125,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1123,8 +1137,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -1208,9 +1222,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1232,8 +1245,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1343,17 +1356,14 @@ golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1364,7 +1374,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -1372,8 +1381,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1439,8 +1448,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1492,8 +1501,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= -google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1584,12 +1593,12 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1655,12 +1664,12 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= diff --git a/internal/cortex/chunk/cache/memcached_client.go b/internal/cortex/chunk/cache/memcached_client.go index a2f7b6b88b..9455f76400 100644 --- a/internal/cortex/chunk/cache/memcached_client.go +++ b/internal/cortex/chunk/cache/memcached_client.go @@ -19,7 +19,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/sony/gobreaker" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/discovery/dns" + memcacheDiscovery "github.com/thanos-io/thanos/pkg/discovery/memcache" + "github.com/thanos-io/thanos/pkg/extprom" ) // MemcachedClient interface exists for mocking memcacheClient. @@ -45,7 +48,7 @@ type memcachedClient struct { service string addresses []string - provider *dns.Provider + provider clientconfig.AddressProvider cbs map[ /*address*/ string]*gobreaker.CircuitBreaker cbFailures uint @@ -68,6 +71,7 @@ type MemcachedClientConfig struct { Host string `yaml:"host"` Service string `yaml:"service"` Addresses string `yaml:"addresses"` // EXPERIMENTAL. + AutoDiscovery bool `yaml:"auto_discovery"` Timeout time.Duration `yaml:"timeout"` MaxIdleConns int `yaml:"max_idle_conns"` MaxItemSize int `yaml:"max_item_size"` @@ -107,9 +111,19 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg client.Timeout = cfg.Timeout client.MaxIdleConns = cfg.MaxIdleConns - dnsProviderRegisterer := prometheus.WrapRegistererWithPrefix("cortex_", prometheus.WrapRegistererWith(prometheus.Labels{ - "name": name, - }, r)) + var addressProvider clientconfig.AddressProvider + if cfg.AutoDiscovery { + addressProvider = memcacheDiscovery.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("cortex_", r), + cfg.Timeout, + ) + } else { + dnsProviderRegisterer := prometheus.WrapRegistererWithPrefix("cortex_", prometheus.WrapRegistererWith(prometheus.Labels{ + "name": name, + }, r)) + addressProvider = dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType) + } newClient := &memcachedClient{ name: name, @@ -118,7 +132,7 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg hostname: cfg.Host, service: cfg.Service, logger: logger, - provider: dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType), + provider: addressProvider, cbs: make(map[string]*gobreaker.CircuitBreaker), cbFailures: cfg.CBFailures, cbInterval: cfg.CBInterval, diff --git a/internal/cortex/querier/queryrange/results_cache.go b/internal/cortex/querier/queryrange/results_cache.go index d29f5c3305..de0f7e8149 100644 --- a/internal/cortex/querier/queryrange/results_cache.go +++ b/internal/cortex/querier/queryrange/results_cache.go @@ -282,6 +282,9 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Re if !s.isAtModifierCachable(req, maxCacheTime) { return false } + if !s.isOffsetCachable(req) { + return false + } if s.cacheGenNumberLoader == nil { return true @@ -305,11 +308,10 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Re return true } -var errAtModifierAfterEnd = errors.New("at modifier after end") - // isAtModifierCachable returns true if the @ modifier result // is safe to cache. func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { + var errAtModifierAfterEnd = errors.New("at modifier after end") // There are 2 cases when @ modifier is not safe to cache: // 1. When @ modifier points to time beyond the maxCacheTime. // 2. If the @ modifier time is > the query range end while being @@ -357,6 +359,46 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { return atModCachable } +// isOffsetCachable returns true if the offset is positive, result is safe to cache. +// and false when offset is negative, result is not cached. +func (s resultsCache) isOffsetCachable(r Request) bool { + var errNegativeOffset = errors.New("negative offset") + query := r.GetQuery() + if !strings.Contains(query, "offset") { + return true + } + expr, err := parser.ParseExpr(query) + if err != nil { + level.Warn(s.logger).Log("msg", "failed to parse query, considering offset as not cachable", "query", query, "err", err) + return false + } + + offsetCachable := true + parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { + switch e := n.(type) { + case *parser.VectorSelector: + if e.OriginalOffset < 0 { + offsetCachable = false + return errNegativeOffset + } + case *parser.MatrixSelector: + offset := e.VectorSelector.(*parser.VectorSelector).OriginalOffset + if offset < 0 { + offsetCachable = false + return errNegativeOffset + } + case *parser.SubqueryExpr: + if e.OriginalOffset < 0 { + offsetCachable = false + return errNegativeOffset + } + } + return nil + }) + + return offsetCachable +} + func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) { for _, hv := range r.GetHeaders() { if hv.GetName() != headerName { diff --git a/internal/cortex/querier/queryrange/results_cache_test.go b/internal/cortex/querier/queryrange/results_cache_test.go index cb7d04fc95..cb12d5cb23 100644 --- a/internal/cortex/querier/queryrange/results_cache_test.go +++ b/internal/cortex/querier/queryrange/results_cache_test.go @@ -532,6 +532,45 @@ func TestShouldCache(t *testing.T) { input: Response(&PrometheusResponse{}), expected: false, }, + // offset on vector selectors. + { + name: "positive offset on vector selector", + request: &PrometheusRequest{Query: "metric offset 10ms", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: true, + }, + { + name: "negative offset on vector selector", + request: &PrometheusRequest{Query: "metric offset -10ms", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: false, + }, + // offset on matrix selectors. + { + name: "positive offset on matrix selector", + request: &PrometheusRequest{Query: "rate(metric[5m] offset 10ms)", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: true, + }, + { + name: "negative offset on matrix selector", + request: &PrometheusRequest{Query: "rate(metric[5m] offset -10ms)", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: false, + }, + // offset on subqueries. + { + name: "positive offset on subqueries", + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset 10ms)", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: true, + }, + { + name: "negative offset on subqueries", + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset -10ms)", End: 125000}, + input: Response(&PrometheusResponse{}), + expected: false, + }, } { { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/alert/config.go b/pkg/alert/config.go index 67613870d9..f509ccc993 100644 --- a/pkg/alert/config.go +++ b/pkg/alert/config.go @@ -15,8 +15,8 @@ import ( "github.com/prometheus/prometheus/model/relabel" "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/discovery/dns" - "github.com/thanos-io/thanos/pkg/httpconfig" ) type AlertingConfig struct { @@ -25,10 +25,10 @@ type AlertingConfig struct { // AlertmanagerConfig represents a client to a cluster of Alertmanager endpoints. type AlertmanagerConfig struct { - HTTPClientConfig httpconfig.ClientConfig `yaml:"http_config"` - EndpointsConfig httpconfig.EndpointsConfig `yaml:",inline"` - Timeout model.Duration `yaml:"timeout"` - APIVersion APIVersion `yaml:"api_version"` + HTTPClientConfig clientconfig.HTTPClientConfig `yaml:"http_config"` + EndpointsConfig clientconfig.HTTPEndpointsConfig `yaml:",inline"` + Timeout model.Duration `yaml:"timeout"` + APIVersion APIVersion `yaml:"api_version"` } // APIVersion represents the API version of the Alertmanager endpoint. @@ -61,10 +61,10 @@ func (v *APIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { func DefaultAlertmanagerConfig() AlertmanagerConfig { return AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ Scheme: "http", StaticAddresses: []string{}, - FileSDConfigs: []httpconfig.FileSDConfig{}, + FileSDConfigs: []clientconfig.HTTPFileSDConfig{}, }, Timeout: model.Duration(time.Second * 10), APIVersion: APIv1, @@ -119,7 +119,7 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage break } } - var basicAuth httpconfig.BasicAuth + var basicAuth clientconfig.BasicAuth if parsed.User != nil && parsed.User.String() != "" { basicAuth.Username = parsed.User.Username() pw, _ := parsed.User.Password() @@ -127,10 +127,10 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage } return AlertmanagerConfig{ - HTTPClientConfig: httpconfig.ClientConfig{ + HTTPClientConfig: clientconfig.HTTPClientConfig{ BasicAuth: basicAuth, }, - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ PathPrefix: parsed.Path, Scheme: scheme, StaticAddresses: []string{host}, diff --git a/pkg/alert/config_test.go b/pkg/alert/config_test.go index 73abb0d9b9..a0e259756c 100644 --- a/pkg/alert/config_test.go +++ b/pkg/alert/config_test.go @@ -10,7 +10,7 @@ import ( "gopkg.in/yaml.v2" "github.com/efficientgo/core/testutil" - "github.com/thanos-io/thanos/pkg/httpconfig" + "github.com/thanos-io/thanos/pkg/clientconfig" ) func TestUnmarshalAPIVersion(t *testing.T) { @@ -54,7 +54,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "http://localhost:9093", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, @@ -64,7 +64,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "https://am.example.com", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"am.example.com"}, Scheme: "https", }, @@ -74,7 +74,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dns+http://localhost:9093", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"dns+localhost:9093"}, Scheme: "http", }, @@ -84,7 +84,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dnssrv+http://localhost", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"dnssrv+localhost"}, Scheme: "http", }, @@ -94,7 +94,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "ssh+http://localhost", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"localhost"}, Scheme: "ssh+http", }, @@ -104,7 +104,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dns+https://localhost/path/prefix/", expected: AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"dns+localhost:9093"}, Scheme: "https", PathPrefix: "/path/prefix/", @@ -115,13 +115,13 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "http://user:pass@localhost:9093", expected: AlertmanagerConfig{ - HTTPClientConfig: httpconfig.ClientConfig{ - BasicAuth: httpconfig.BasicAuth{ + HTTPClientConfig: clientconfig.HTTPClientConfig{ + BasicAuth: clientconfig.BasicAuth{ Username: "user", Password: "pass", }, }, - EndpointsConfig: httpconfig.EndpointsConfig{ + EndpointsConfig: clientconfig.HTTPEndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index 9f654b549b..d6767d9a94 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -99,6 +99,7 @@ type QueryEngineFactory struct { createThanosEngine sync.Once thanosEngine v1.QueryEngine + enableXFunctions bool } func (f *QueryEngineFactory) GetPrometheusEngine() v1.QueryEngine { @@ -118,7 +119,7 @@ func (f *QueryEngineFactory) GetThanosEngine() v1.QueryEngine { return } if f.remoteEngineEndpoints == nil { - f.thanosEngine = engine.New(engine.Opts{EngineOpts: f.engineOpts, Engine: f.GetPrometheusEngine(), EnableAnalysis: true}) + f.thanosEngine = engine.New(engine.Opts{EngineOpts: f.engineOpts, Engine: f.GetPrometheusEngine(), EnableAnalysis: true, EnableXFunctions: f.enableXFunctions}) } else { f.thanosEngine = engine.NewDistributedEngine(engine.Opts{EngineOpts: f.engineOpts, Engine: f.GetPrometheusEngine(), EnableAnalysis: true}, f.remoteEngineEndpoints) } @@ -127,13 +128,11 @@ func (f *QueryEngineFactory) GetThanosEngine() v1.QueryEngine { return f.thanosEngine } -func NewQueryEngineFactory( - engineOpts promql.EngineOpts, - remoteEngineEndpoints promqlapi.RemoteEndpoints, -) *QueryEngineFactory { +func NewQueryEngineFactory(engineOpts promql.EngineOpts, remoteEngineEndpoints promqlapi.RemoteEndpoints, enableExtendedFunctions bool) *QueryEngineFactory { return &QueryEngineFactory{ engineOpts: engineOpts, remoteEngineEndpoints: remoteEngineEndpoints, + enableXFunctions: enableExtendedFunctions, } } @@ -175,6 +174,8 @@ type QueryAPI struct { tenantHeader string defaultTenant string tenantCertField string + enforceTenancy bool + tenantLabel string } // NewQueryAPI returns an initialized QueryAPI type. @@ -208,6 +209,8 @@ func NewQueryAPI( tenantHeader string, defaultTenant string, tenantCertField string, + enforceTenancy bool, + tenantLabel string, ) *QueryAPI { if statsAggregatorFactory == nil { statsAggregatorFactory = &store.NoopSeriesStatsAggregatorFactory{} @@ -241,6 +244,8 @@ func NewQueryAPI( tenantHeader: tenantHeader, defaultTenant: defaultTenant, tenantCertField: tenantCertField, + enforceTenancy: enforceTenancy, + tenantLabel: tenantLabel, queryRangeHist: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "thanos_query_range_requested_timespan_duration_seconds", @@ -645,12 +650,10 @@ func (qapi *QueryAPI) query(r *http.Request) (interface{}, []error, *api.ApiErro lookbackDelta = lookbackDeltaFromReq } - tenant, err := tenancy.GetTenantFromHTTP(r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField) + queryStr, tenant, ctx, err := tenancy.RewritePromQL(ctx, r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField, qapi.enforceTenancy, qapi.tenantLabel, r.FormValue("query")) if err != nil { - apiErr = &api.ApiError{Typ: api.ErrorBadData, Err: err} - return nil, nil, apiErr, func() {} + return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} } - ctx = context.WithValue(ctx, tenancy.TenantKey, tenant) // We are starting promQL tracing span here, because we have no control over promQL code. span, ctx := tracing.StartSpan(ctx, "promql_instant_query") @@ -671,7 +674,7 @@ func (qapi *QueryAPI) query(r *http.Request) (interface{}, []error, *api.ApiErro query.NewAggregateStatsReporter(&seriesStats), ), promql.NewPrometheusQueryOpts(false, lookbackDelta), - r.FormValue("query"), + queryStr, ts, ) @@ -944,12 +947,10 @@ func (qapi *QueryAPI) queryRange(r *http.Request) (interface{}, []error, *api.Ap lookbackDelta = lookbackDeltaFromReq } - tenant, err := tenancy.GetTenantFromHTTP(r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField) + queryStr, tenant, ctx, err := tenancy.RewritePromQL(ctx, r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField, qapi.enforceTenancy, qapi.tenantLabel, r.FormValue("query")) if err != nil { - apiErr = &api.ApiError{Typ: api.ErrorBadData, Err: err} - return nil, nil, apiErr, func() {} + return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} } - ctx = context.WithValue(ctx, tenancy.TenantKey, tenant) // Record the query range requested. qapi.queryRangeHist.Observe(end.Sub(start).Seconds()) @@ -973,7 +974,7 @@ func (qapi *QueryAPI) queryRange(r *http.Request) (interface{}, []error, *api.Ap query.NewAggregateStatsReporter(&seriesStats), ), promql.NewPrometheusQueryOpts(false, lookbackDelta), - r.FormValue("query"), + queryStr, start, end, step, @@ -1049,21 +1050,11 @@ func (qapi *QueryAPI) labelValues(r *http.Request) (interface{}, []error, *api.A return nil, nil, apiErr, func() {} } - var matcherSets [][]*labels.Matcher - for _, s := range r.Form[MatcherParam] { - matchers, err := parser.ParseMetricSelector(s) - if err != nil { - return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} - } - matcherSets = append(matcherSets, matchers) - } - - tenant, err := tenancy.GetTenantFromHTTP(r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField) + matcherSets, ctx, err := tenancy.RewriteLabelMatchers(ctx, r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField, qapi.enforceTenancy, qapi.tenantLabel, r.Form[MatcherParam]) if err != nil { apiErr = &api.ApiError{Typ: api.ErrorBadData, Err: err} return nil, nil, apiErr, func() {} } - ctx = context.WithValue(ctx, tenancy.TenantKey, tenant) q, err := qapi.queryableCreate( true, @@ -1132,13 +1123,10 @@ func (qapi *QueryAPI) series(r *http.Request) (interface{}, []error, *api.ApiErr return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} } - var matcherSets [][]*labels.Matcher - for _, s := range r.Form[MatcherParam] { - matchers, err := parser.ParseMetricSelector(s) - if err != nil { - return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} - } - matcherSets = append(matcherSets, matchers) + matcherSets, ctx, err := tenancy.RewriteLabelMatchers(r.Context(), r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField, qapi.enforceTenancy, qapi.tenantLabel, r.Form[MatcherParam]) + if err != nil { + apiErr := &api.ApiError{Typ: api.ErrorBadData, Err: err} + return nil, nil, apiErr, func() {} } enableDedup, apiErr := qapi.parseEnableDedupParam(r) @@ -1161,13 +1149,6 @@ func (qapi *QueryAPI) series(r *http.Request) (interface{}, []error, *api.ApiErr return nil, nil, apiErr, func() {} } - tenant, err := tenancy.GetTenantFromHTTP(r, qapi.tenantHeader, qapi.defaultTenant, "") - if err != nil { - apiErr = &api.ApiError{Typ: api.ErrorBadData, Err: err} - return nil, nil, apiErr, func() {} - } - ctx := context.WithValue(r.Context(), tenancy.TenantKey, tenant) - q, err := qapi.queryableCreate( enableDedup, replicaLabels, @@ -1219,21 +1200,11 @@ func (qapi *QueryAPI) labelNames(r *http.Request) (interface{}, []error, *api.Ap return nil, nil, apiErr, func() {} } - var matcherSets [][]*labels.Matcher - for _, s := range r.Form[MatcherParam] { - matchers, err := parser.ParseMetricSelector(s) - if err != nil { - return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {} - } - matcherSets = append(matcherSets, matchers) - } - - tenant, err := tenancy.GetTenantFromHTTP(r, qapi.tenantHeader, qapi.defaultTenant, "") + matcherSets, ctx, err := tenancy.RewriteLabelMatchers(r.Context(), r, qapi.tenantHeader, qapi.defaultTenant, qapi.tenantCertField, qapi.enforceTenancy, qapi.tenantLabel, r.Form[MatcherParam]) if err != nil { - apiErr = &api.ApiError{Typ: api.ErrorBadData, Err: err} + apiErr := &api.ApiError{Typ: api.ErrorBadData, Err: err} return nil, nil, apiErr, func() {} } - ctx := context.WithValue(r.Context(), tenancy.TenantKey, tenant) q, err := qapi.queryableCreate( true, diff --git a/pkg/api/query/v1_test.go b/pkg/api/query/v1_test.go index 37195fb6bb..ef119bfa19 100644 --- a/pkg/api/query/v1_test.go +++ b/pkg/api/query/v1_test.go @@ -190,7 +190,7 @@ func TestQueryEndpoints(t *testing.T) { Reg: nil, MaxSamples: 10000, Timeout: timeout, - }, nil) + }, nil, false) api := &QueryAPI{ baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, @@ -643,7 +643,7 @@ func TestQueryExplainEndpoints(t *testing.T) { Reg: nil, MaxSamples: 10000, Timeout: timeout, - }, nil) + }, nil, false) api := &QueryAPI{ baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, @@ -707,7 +707,7 @@ func TestQueryAnalyzeEndpoints(t *testing.T) { Reg: nil, MaxSamples: 10000, Timeout: timeout, - }, nil) + }, nil, false) api := &QueryAPI{ baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, @@ -881,7 +881,7 @@ func TestMetadataEndpoints(t *testing.T) { Reg: nil, MaxSamples: 10000, Timeout: timeout, - }, nil) + }, nil, false) api := &QueryAPI{ baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, diff --git a/pkg/block/indexheader/header_test.go b/pkg/block/indexheader/header_test.go index 56dabc33f7..4130157a96 100644 --- a/pkg/block/indexheader/header_test.go +++ b/pkg/block/indexheader/header_test.go @@ -206,7 +206,7 @@ func TestReaders(t *testing.T) { _, err := WriteBinary(ctx, bkt, id, fn) testutil.Ok(t, err) - br, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), nil, tmpDir, id, 3, NewLazyBinaryReaderMetrics(nil), NewBinaryReaderMetrics(nil), nil) + br, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), nil, tmpDir, id, 3, NewLazyBinaryReaderMetrics(nil), NewBinaryReaderMetrics(nil), nil, false) testutil.Ok(t, err) defer func() { testutil.Ok(t, br.Close()) }() diff --git a/pkg/block/indexheader/lazy_binary_reader.go b/pkg/block/indexheader/lazy_binary_reader.go index d7e589c724..2b36bf8025 100644 --- a/pkg/block/indexheader/lazy_binary_reader.go +++ b/pkg/block/indexheader/lazy_binary_reader.go @@ -83,6 +83,9 @@ type LazyBinaryReader struct { // Keep track of the last time it was used. usedAt *atomic.Int64 + + // If true, index header will be downloaded at query time rather than initialization time. + lazyDownload bool } // NewLazyBinaryReader makes a new LazyBinaryReader. If the index-header does not exist @@ -99,8 +102,9 @@ func NewLazyBinaryReader( metrics *LazyBinaryReaderMetrics, binaryReaderMetrics *BinaryReaderMetrics, onClosed func(*LazyBinaryReader), + lazyDownload bool, ) (*LazyBinaryReader, error) { - if dir != "" { + if dir != "" && !lazyDownload { indexHeaderFile := filepath.Join(dir, id.String(), block.IndexHeaderFilename) // If the index-header doesn't exist we should download it. if _, err := os.Stat(indexHeaderFile); err != nil { @@ -131,6 +135,7 @@ func NewLazyBinaryReader( binaryReaderMetrics: binaryReaderMetrics, usedAt: atomic.NewInt64(time.Now().UnixNano()), onClosed: onClosed, + lazyDownload: lazyDownload, }, nil } diff --git a/pkg/block/indexheader/lazy_binary_reader_test.go b/pkg/block/indexheader/lazy_binary_reader_test.go index 150f2d649b..d740da99ab 100644 --- a/pkg/block/indexheader/lazy_binary_reader_test.go +++ b/pkg/block/indexheader/lazy_binary_reader_test.go @@ -5,6 +5,7 @@ package indexheader import ( "context" + "fmt" "os" "path/filepath" "sync" @@ -31,11 +32,11 @@ func TestNewLazyBinaryReader_ShouldFailIfUnableToBuildIndexHeader(t *testing.T) bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) testutil.Ok(t, err) defer func() { testutil.Ok(t, bkt.Close()) }() - _, err = NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, ulid.MustNew(0, nil), 3, NewLazyBinaryReaderMetrics(nil), NewBinaryReaderMetrics(nil), nil) + _, err = NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, ulid.MustNew(0, nil), 3, NewLazyBinaryReaderMetrics(nil), NewBinaryReaderMetrics(nil), nil, false) testutil.NotOk(t, err) } -func TestNewLazyBinaryReader_ShouldBuildIndexHeaderFromBucket(t *testing.T) { +func TestNewLazyBinaryReader_ShouldNotFailIfUnableToBuildIndexHeaderWhenLazyDownload(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() @@ -43,36 +44,61 @@ func TestNewLazyBinaryReader_ShouldBuildIndexHeaderFromBucket(t *testing.T) { bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) testutil.Ok(t, err) defer func() { testutil.Ok(t, bkt.Close()) }() - - // Create block. - blockID, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ - {{Name: "a", Value: "1"}}, - {{Name: "a", Value: "2"}}, - }, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "1"}}, 124, metadata.NoneFunc) + _, err = NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, ulid.MustNew(0, nil), 3, NewLazyBinaryReaderMetrics(nil), NewBinaryReaderMetrics(nil), nil, true) testutil.Ok(t, err) - testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) +} - m := NewLazyBinaryReaderMetrics(nil) - bm := NewBinaryReaderMetrics(nil) - r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil) - testutil.Ok(t, err) - testutil.Assert(t, r.reader == nil) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) +func TestNewLazyBinaryReader_ShouldBuildIndexHeaderFromBucket(t *testing.T) { + ctx := context.Background() - // Should lazy load the index upon first usage. - v, err := r.IndexVersion() - testutil.Ok(t, err) - testutil.Equals(t, 2, v) - testutil.Assert(t, r.reader != nil) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + tmpDir := t.TempDir() - labelNames, err := r.LabelNames() + bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt")) testutil.Ok(t, err) - testutil.Equals(t, []string{"a"}, labelNames) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + defer func() { testutil.Ok(t, bkt.Close()) }() + + for _, lazyDownload := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { + // Create block. + blockID, err := e2eutil.CreateBlock(ctx, tmpDir, []labels.Labels{ + {{Name: "a", Value: "1"}}, + {{Name: "a", Value: "2"}}, + }, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "1"}}, 124, metadata.NoneFunc) + testutil.Ok(t, err) + testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + + m := NewLazyBinaryReaderMetrics(nil) + bm := NewBinaryReaderMetrics(nil) + r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil, lazyDownload) + testutil.Ok(t, err) + testutil.Assert(t, r.reader == nil) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + + _, err = os.Stat(filepath.Join(r.dir, blockID.String(), block.IndexHeaderFilename)) + // Index file shouldn't exist. + if lazyDownload { + testutil.Equals(t, true, os.IsNotExist(err)) + } + // Should lazy load the index upon first usage. + v, err := r.IndexVersion() + testutil.Ok(t, err) + if lazyDownload { + _, err = os.Stat(filepath.Join(r.dir, blockID.String(), block.IndexHeaderFilename)) + testutil.Ok(t, err) + } + testutil.Equals(t, 2, v) + testutil.Assert(t, r.reader != nil) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + + labelNames, err := r.LabelNames() + testutil.Ok(t, err) + testutil.Equals(t, []string{"a"}, labelNames) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + }) + } } func TestNewLazyBinaryReader_ShouldRebuildCorruptedIndexHeader(t *testing.T) { @@ -96,22 +122,26 @@ func TestNewLazyBinaryReader_ShouldRebuildCorruptedIndexHeader(t *testing.T) { headerFilename := filepath.Join(tmpDir, blockID.String(), block.IndexHeaderFilename) testutil.Ok(t, os.WriteFile(headerFilename, []byte("xxx"), os.ModePerm)) - m := NewLazyBinaryReaderMetrics(nil) - bm := NewBinaryReaderMetrics(nil) - r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil) - testutil.Ok(t, err) - testutil.Assert(t, r.reader == nil) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) - - // Ensure it can read data. - labelNames, err := r.LabelNames() - testutil.Ok(t, err) - testutil.Equals(t, []string{"a"}, labelNames) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + for _, lazyDownload := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { + m := NewLazyBinaryReaderMetrics(nil) + bm := NewBinaryReaderMetrics(nil) + r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil, lazyDownload) + testutil.Ok(t, err) + testutil.Assert(t, r.reader == nil) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + + // Ensure it can read data. + labelNames, err := r.LabelNames() + testutil.Ok(t, err) + testutil.Equals(t, []string{"a"}, labelNames) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + }) + } } func TestLazyBinaryReader_ShouldReopenOnUsageAfterClose(t *testing.T) { @@ -131,37 +161,41 @@ func TestLazyBinaryReader_ShouldReopenOnUsageAfterClose(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) - m := NewLazyBinaryReaderMetrics(nil) - bm := NewBinaryReaderMetrics(nil) - r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil) - testutil.Ok(t, err) - testutil.Assert(t, r.reader == nil) - - // Should lazy load the index upon first usage. - labelNames, err := r.LabelNames() - testutil.Ok(t, err) - testutil.Equals(t, []string{"a"}, labelNames) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - - // Close it. - testutil.Ok(t, r.Close()) - testutil.Assert(t, r.reader == nil) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.unloadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) - - // Should lazy load again upon next usage. - labelNames, err = r.LabelNames() - testutil.Ok(t, err) - testutil.Equals(t, []string{"a"}, labelNames) - testutil.Equals(t, float64(2), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - - // Closing an already closed lazy reader should be a no-op. - for i := 0; i < 2; i++ { - testutil.Ok(t, r.Close()) - testutil.Equals(t, float64(2), promtestutil.ToFloat64(m.unloadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + for _, lazyDownload := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { + m := NewLazyBinaryReaderMetrics(nil) + bm := NewBinaryReaderMetrics(nil) + r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil, lazyDownload) + testutil.Ok(t, err) + testutil.Assert(t, r.reader == nil) + + // Should lazy load the index upon first usage. + labelNames, err := r.LabelNames() + testutil.Ok(t, err) + testutil.Equals(t, []string{"a"}, labelNames) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + + // Close it. + testutil.Ok(t, r.Close()) + testutil.Assert(t, r.reader == nil) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.unloadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + + // Should lazy load again upon next usage. + labelNames, err = r.LabelNames() + testutil.Ok(t, err) + testutil.Equals(t, []string{"a"}, labelNames) + testutil.Equals(t, float64(2), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + + // Closing an already closed lazy reader should be a no-op. + for i := 0; i < 2; i++ { + testutil.Ok(t, r.Close()) + testutil.Equals(t, float64(2), promtestutil.ToFloat64(m.unloadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + } + }) } } @@ -182,34 +216,38 @@ func TestLazyBinaryReader_unload_ShouldReturnErrorIfNotIdle(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) - m := NewLazyBinaryReaderMetrics(nil) - bm := NewBinaryReaderMetrics(nil) - r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil) - testutil.Ok(t, err) - testutil.Assert(t, r.reader == nil) - - // Should lazy load the index upon first usage. - labelNames, err := r.LabelNames() - testutil.Ok(t, err) - testutil.Equals(t, []string{"a"}, labelNames) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) - - // Try to unload but not idle since enough time. - testutil.Equals(t, errNotIdle, r.unloadIfIdleSince(time.Now().Add(-time.Minute).UnixNano())) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) - - // Try to unload and idle since enough time. - testutil.Ok(t, r.unloadIfIdleSince(time.Now().UnixNano())) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) - testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.unloadCount)) - testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + for _, lazyDownload := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { + m := NewLazyBinaryReaderMetrics(nil) + bm := NewBinaryReaderMetrics(nil) + r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil, lazyDownload) + testutil.Ok(t, err) + testutil.Assert(t, r.reader == nil) + + // Should lazy load the index upon first usage. + labelNames, err := r.LabelNames() + testutil.Ok(t, err) + testutil.Equals(t, []string{"a"}, labelNames) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + + // Try to unload but not idle since enough time. + testutil.Equals(t, errNotIdle, r.unloadIfIdleSince(time.Now().Add(-time.Minute).UnixNano())) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + + // Try to unload and idle since enough time. + testutil.Ok(t, r.unloadIfIdleSince(time.Now().UnixNano())) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.loadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.loadFailedCount)) + testutil.Equals(t, float64(1), promtestutil.ToFloat64(m.unloadCount)) + testutil.Equals(t, float64(0), promtestutil.ToFloat64(m.unloadFailedCount)) + }) + } } func TestLazyBinaryReader_LoadUnloadRaceCondition(t *testing.T) { @@ -232,49 +270,53 @@ func TestLazyBinaryReader_LoadUnloadRaceCondition(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) - m := NewLazyBinaryReaderMetrics(nil) - bm := NewBinaryReaderMetrics(nil) - r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil) - testutil.Ok(t, err) - testutil.Assert(t, r.reader == nil) - t.Cleanup(func() { - testutil.Ok(t, r.Close()) - }) - - done := make(chan struct{}) - time.AfterFunc(runDuration, func() { close(done) }) - wg := sync.WaitGroup{} - wg.Add(2) - - // Start a goroutine which continuously try to unload the reader. - go func() { - defer wg.Done() - - for { - select { - case <-done: - return - default: - testutil.Ok(t, r.unloadIfIdleSince(0)) - } - } - }() - - // Try to read multiple times, while the other goroutine continuously try to unload it. - go func() { - defer wg.Done() - - for { - select { - case <-done: - return - default: - _, err := r.PostingsOffset("a", "1") - testutil.Assert(t, err == nil || err == errUnloadedWhileLoading) - } - } - }() - - // Wait until both goroutines have done. - wg.Wait() + for _, lazyDownload := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyDownload=%v", lazyDownload), func(t *testing.T) { + m := NewLazyBinaryReaderMetrics(nil) + bm := NewBinaryReaderMetrics(nil) + r, err := NewLazyBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, m, bm, nil, lazyDownload) + testutil.Ok(t, err) + testutil.Assert(t, r.reader == nil) + t.Cleanup(func() { + testutil.Ok(t, r.Close()) + }) + + done := make(chan struct{}) + time.AfterFunc(runDuration, func() { close(done) }) + wg := sync.WaitGroup{} + wg.Add(2) + + // Start a goroutine which continuously try to unload the reader. + go func() { + defer wg.Done() + + for { + select { + case <-done: + return + default: + testutil.Ok(t, r.unloadIfIdleSince(0)) + } + } + }() + + // Try to read multiple times, while the other goroutine continuously try to unload it. + go func() { + defer wg.Done() + + for { + select { + case <-done: + return + default: + _, err := r.PostingsOffset("a", "1") + testutil.Assert(t, err == nil || err == errUnloadedWhileLoading) + } + } + }() + + // Wait until both goroutines have done. + wg.Wait() + }) + } } diff --git a/pkg/block/indexheader/reader_pool.go b/pkg/block/indexheader/reader_pool.go index fc8cb26813..e9fe5eb7dc 100644 --- a/pkg/block/indexheader/reader_pool.go +++ b/pkg/block/indexheader/reader_pool.go @@ -14,6 +14,8 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" + + "github.com/thanos-io/thanos/pkg/block/metadata" ) // ReaderPoolMetrics holds metrics tracked by ReaderPool. @@ -46,10 +48,47 @@ type ReaderPool struct { // Keep track of all readers managed by the pool. lazyReadersMx sync.Mutex lazyReaders map[*LazyBinaryReader]struct{} + + lazyDownloadFunc LazyDownloadIndexHeaderFunc +} + +// IndexHeaderLazyDownloadStrategy specifies how to download index headers +// lazily. Only used when lazy mmap is enabled. +type IndexHeaderLazyDownloadStrategy string + +const ( + // EagerDownloadStrategy always disables lazy downloading index headers. + EagerDownloadStrategy IndexHeaderLazyDownloadStrategy = "eager" + // LazyDownloadStrategy always lazily download index headers. + LazyDownloadStrategy IndexHeaderLazyDownloadStrategy = "lazy" +) + +func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIndexHeaderFunc { + switch s { + case LazyDownloadStrategy: + return AlwaysLazyDownloadIndexHeader + default: + // Always fallback to eager download index header. + return AlwaysEagerDownloadIndexHeader + } +} + +// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily +// or not by checking its block metadata. Usecase can be by time or by index file size. +type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool + +// AlwaysEagerDownloadIndexHeader always eagerly download index header. +func AlwaysEagerDownloadIndexHeader(meta *metadata.Meta) bool { + return false +} + +// AlwaysLazyDownloadIndexHeader always lazily download index header. +func AlwaysLazyDownloadIndexHeader(meta *metadata.Meta) bool { + return true } // NewReaderPool makes a new ReaderPool. -func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, metrics *ReaderPoolMetrics) *ReaderPool { +func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, metrics *ReaderPoolMetrics, lazyDownloadFunc LazyDownloadIndexHeaderFunc) *ReaderPool { p := &ReaderPool{ logger: logger, metrics: metrics, @@ -57,6 +96,7 @@ func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTime lazyReaderIdleTimeout: lazyReaderIdleTimeout, lazyReaders: make(map[*LazyBinaryReader]struct{}), close: make(chan struct{}), + lazyDownloadFunc: lazyDownloadFunc, } // Start a goroutine to close idle readers (only if required). @@ -81,12 +121,12 @@ func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTime // NewBinaryReader creates and returns a new binary reader. If the pool has been configured // with lazy reader enabled, this function will return a lazy reader. The returned lazy reader // is tracked by the pool and automatically closed once the idle timeout expires. -func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (Reader, error) { +func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int, meta *metadata.Meta) (Reader, error) { var reader Reader var err error if p.lazyReaderEnabled { - reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed) + reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta)) } else { reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.binaryReader) } diff --git a/pkg/block/indexheader/reader_pool_test.go b/pkg/block/indexheader/reader_pool_test.go index 4ed60ea8fb..a7445f0fed 100644 --- a/pkg/block/indexheader/reader_pool_test.go +++ b/pkg/block/indexheader/reader_pool_test.go @@ -54,12 +54,15 @@ func TestReaderPool_NewBinaryReader(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + meta, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) + testutil.Ok(t, err) + for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - pool := NewReaderPool(log.NewNopLogger(), testData.lazyReaderEnabled, testData.lazyReaderIdleTimeout, NewReaderPoolMetrics(nil)) + pool := NewReaderPool(log.NewNopLogger(), testData.lazyReaderEnabled, testData.lazyReaderIdleTimeout, NewReaderPoolMetrics(nil), AlwaysEagerDownloadIndexHeader) defer pool.Close() - r, err := pool.NewBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3) + r, err := pool.NewBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, meta) testutil.Ok(t, err) defer func() { testutil.Ok(t, r.Close()) }() @@ -89,12 +92,14 @@ func TestReaderPool_ShouldCloseIdleLazyReaders(t *testing.T) { }, 100, 0, 1000, labels.Labels{{Name: "ext1", Value: "1"}}, 124, metadata.NoneFunc) testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(tmpDir, blockID.String()), metadata.NoneFunc)) + meta, err := metadata.ReadFromDir(filepath.Join(tmpDir, blockID.String())) + testutil.Ok(t, err) metrics := NewReaderPoolMetrics(nil) - pool := NewReaderPool(log.NewNopLogger(), true, idleTimeout, metrics) + pool := NewReaderPool(log.NewNopLogger(), true, idleTimeout, metrics, AlwaysEagerDownloadIndexHeader) defer pool.Close() - r, err := pool.NewBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3) + r, err := pool.NewBinaryReader(ctx, log.NewNopLogger(), bkt, tmpDir, blockID, 3, meta) testutil.Ok(t, err) defer func() { testutil.Ok(t, r.Close()) }() diff --git a/pkg/clientconfig/config.go b/pkg/clientconfig/config.go new file mode 100644 index 0000000000..9de1b4f580 --- /dev/null +++ b/pkg/clientconfig/config.go @@ -0,0 +1,99 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package clientconfig is a wrapper around github.com/prometheus/common/config with additional +// support for gRPC clients. +package clientconfig + +import ( + "fmt" + "net/url" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +// Config is a structure that allows pointing to various HTTP and GRPC endpoints, e.g. ruler connecting to queriers. +type Config struct { + HTTPConfig HTTPConfig `yaml:",inline"` + GRPCConfig *GRPCConfig `yaml:"grpc_config"` +} + +func DefaultConfig() Config { + return Config{ + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + Scheme: "http", + StaticAddresses: []string{}, + FileSDConfigs: []HTTPFileSDConfig{}, + }, + }, + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{}, + }, + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig() + type plain Config + return unmarshal((*plain)(c)) +} + +// LoadConfigs loads a list of Config from YAML data. +func LoadConfigs(confYAML []byte) ([]Config, error) { + var clientCfg []Config + if err := yaml.UnmarshalStrict(confYAML, &clientCfg); err != nil { + return nil, err + } + return clientCfg, nil +} + +// BuildConfigFromHTTPAddresses returns a configuration from static addresses. +func BuildConfigFromHTTPAddresses(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { + if addr == "" { + return nil, errors.Errorf("static address cannot be empty at index %d", i) + } + // If addr is missing schema, add http. + if !strings.Contains(addr, "://") { + addr = fmt.Sprintf("http://%s", addr) + } + u, err := url.Parse(addr) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse addr %q", addr) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.Errorf("%q is not supported scheme for address", u.Scheme) + } + configs = append(configs, Config{ + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + Scheme: u.Scheme, + StaticAddresses: []string{u.Host}, + PathPrefix: u.Path, + }, + }, + }) + } + return configs, nil +} + +// BuildConfigFromGRPCAddresses returns a configuration from a static addresses. +func BuildConfigFromGRPCAddresses(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { + if addr == "" { + return nil, errors.Errorf("static address cannot be empty at index %d", i) + } + configs = append(configs, Config{ + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{addr}, + }, + }) + } + return configs, nil +} diff --git a/pkg/clientconfig/config_test.go b/pkg/clientconfig/config_test.go new file mode 100644 index 0000000000..44cdea6e72 --- /dev/null +++ b/pkg/clientconfig/config_test.go @@ -0,0 +1,162 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package clientconfig + +import ( + "testing" + + "github.com/efficientgo/core/testutil" +) + +func TestBuildHTTPConfig(t *testing.T) { + for _, tc := range []struct { + desc string + addresses []string + err bool + expected []Config + }{ + { + desc: "single addr without path", + addresses: []string{"localhost:9093"}, + expected: []Config{ + { + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + StaticAddresses: []string{"localhost:9093"}, + Scheme: "http", + }, + }, + }, + }, + }, + { + desc: "1st addr without path, 2nd with", + addresses: []string{"localhost:9093", "localhost:9094/prefix"}, + expected: []Config{ + { + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + StaticAddresses: []string{"localhost:9093"}, + Scheme: "http", + }, + }, + }, + { + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + StaticAddresses: []string{"localhost:9094"}, + Scheme: "http", + PathPrefix: "/prefix", + }, + }, + }, + }, + }, + { + desc: "single addr with path and http scheme", + addresses: []string{"http://localhost:9093"}, + expected: []Config{ + { + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + StaticAddresses: []string{"localhost:9093"}, + Scheme: "http", + }, + }, + }, + }, + }, + { + desc: "single addr with path and https scheme", + addresses: []string{"https://localhost:9093"}, + expected: []Config{ + { + HTTPConfig: HTTPConfig{ + EndpointsConfig: HTTPEndpointsConfig{ + StaticAddresses: []string{"localhost:9093"}, + Scheme: "https", + }, + }, + }, + }, + }, + { + desc: "not supported scheme", + addresses: []string{"ttp://localhost:9093"}, + err: true, + }, + { + desc: "invalid addr", + addresses: []string{"this is not a valid addr"}, + err: true, + }, + { + desc: "empty addr", + addresses: []string{""}, + err: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + cfg, err := BuildConfigFromHTTPAddresses(tc.addresses) + if tc.err { + testutil.NotOk(t, err) + return + } + + testutil.Equals(t, tc.expected, cfg) + }) + } +} + +func TestBuildGRPCConfig(t *testing.T) { + for _, tc := range []struct { + desc string + addresses []string + err bool + expected []Config + }{ + { + desc: "single addr", + addresses: []string{"localhost:9093"}, + expected: []Config{ + { + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{"localhost:9093"}, + }, + }, + }, + }, + { + desc: "multiple addr", + addresses: []string{"localhost:9093", "localhost:9094"}, + expected: []Config{ + { + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{"localhost:9093"}, + }, + }, + { + GRPCConfig: &GRPCConfig{ + EndpointAddrs: []string{"localhost:9094"}, + }, + }, + }, + }, + { + desc: "empty addr", + addresses: []string{""}, + err: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + cfg, err := BuildConfigFromGRPCAddresses(tc.addresses) + if tc.err { + testutil.NotOk(t, err) + return + } + + testutil.Equals(t, tc.expected, cfg) + }) + } +} diff --git a/pkg/clientconfig/grpc.go b/pkg/clientconfig/grpc.go new file mode 100644 index 0000000000..e0987b6c8a --- /dev/null +++ b/pkg/clientconfig/grpc.go @@ -0,0 +1,8 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package clientconfig + +type GRPCConfig struct { + EndpointAddrs []string `yaml:"endpoint_addresses"` +} diff --git a/pkg/httpconfig/http.go b/pkg/clientconfig/http.go similarity index 85% rename from pkg/httpconfig/http.go rename to pkg/clientconfig/http.go index 4b71a03fda..b99dd9ef8f 100644 --- a/pkg/httpconfig/http.go +++ b/pkg/clientconfig/http.go @@ -1,8 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -// Package httpconfig is a wrapper around github.com/prometheus/common/config. -package httpconfig +package clientconfig import ( "context" @@ -15,6 +14,8 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/go-kit/log" @@ -30,8 +31,18 @@ import ( "github.com/thanos-io/thanos/pkg/discovery/cache" ) -// ClientConfig configures an HTTP client. -type ClientConfig struct { +// HTTPConfig is a structure that allows pointing to various HTTP endpoint, e.g ruler connecting to queriers. +type HTTPConfig struct { + HTTPClientConfig HTTPClientConfig `yaml:"http_config"` + EndpointsConfig HTTPEndpointsConfig `yaml:",inline"` +} + +func (c *HTTPConfig) NotEmpty() bool { + return len(c.EndpointsConfig.FileSDConfigs) > 0 || len(c.EndpointsConfig.StaticAddresses) > 0 +} + +// HTTPClientConfig configures an HTTP client. +type HTTPClientConfig struct { // The HTTP basic authentication credentials for the targets. BasicAuth BasicAuth `yaml:"basic_auth"` // The bearer token for the targets. @@ -75,7 +86,7 @@ func (b BasicAuth) IsZero() bool { return b.Username == "" && b.Password == "" && b.PasswordFile == "" } -// Transport configures client's transport properties. +// TransportConfig configures client's transport properties. type TransportConfig struct { MaxIdleConns int `yaml:"max_idle_conns"` MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` @@ -100,12 +111,12 @@ var defaultTransportConfig TransportConfig = TransportConfig{ DialerTimeout: int64(5 * time.Second), } -func NewDefaultClientConfig() ClientConfig { - return ClientConfig{TransportConfig: defaultTransportConfig} +func NewDefaultHTTPClientConfig() HTTPClientConfig { + return HTTPClientConfig{TransportConfig: defaultTransportConfig} } -func NewClientConfigFromYAML(cfg []byte) (*ClientConfig, error) { - conf := &ClientConfig{TransportConfig: defaultTransportConfig} +func NewHTTPClientConfigFromYAML(cfg []byte) (*HTTPClientConfig, error) { + conf := &HTTPClientConfig{TransportConfig: defaultTransportConfig} if err := yaml.Unmarshal(cfg, conf); err != nil { return nil, err } @@ -188,7 +199,7 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig } // NewHTTPClient returns a new HTTP client. -func NewHTTPClient(cfg ClientConfig, name string) (*http.Client, error) { +func NewHTTPClient(cfg HTTPClientConfig, name string) (*http.Client, error) { httpClientConfig := config_util.HTTPClientConfig{ BearerToken: config_util.Secret(cfg.BearerToken), BearerTokenFile: cfg.BearerTokenFile, @@ -272,13 +283,13 @@ func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error return u.rt.RoundTrip(r) } -// EndpointsConfig configures a cluster of HTTP endpoints from static addresses and +// HTTPEndpointsConfig configures a cluster of HTTP endpoints from static addresses and // file service discovery. -type EndpointsConfig struct { +type HTTPEndpointsConfig struct { // List of addresses with DNS prefixes. StaticAddresses []string `yaml:"static_configs"` // List of file configurations (our FileSD supports different DNS lookups). - FileSDConfigs []FileSDConfig `yaml:"file_sd_configs"` + FileSDConfigs []HTTPFileSDConfig `yaml:"file_sd_configs"` // The URL scheme to use when talking to targets. Scheme string `yaml:"scheme"` @@ -287,13 +298,13 @@ type EndpointsConfig struct { PathPrefix string `yaml:"path_prefix"` } -// FileSDConfig represents a file service discovery configuration. -type FileSDConfig struct { +// HTTPFileSDConfig represents a file service discovery configuration. +type HTTPFileSDConfig struct { Files []string `yaml:"files"` RefreshInterval model.Duration `yaml:"refresh_interval"` } -func (c FileSDConfig) convert() (file.SDConfig, error) { +func (c HTTPFileSDConfig) convert() (file.SDConfig, error) { var fileSDConfig file.SDConfig b, err := yaml.Marshal(c) if err != nil { @@ -308,8 +319,8 @@ type AddressProvider interface { Addresses() []string } -// Client represents a client that can send requests to a cluster of HTTP-based endpoints. -type Client struct { +// HTTPClient represents a client that can send requests to a cluster of HTTP-based endpoints. +type HTTPClient struct { logger log.Logger httpClient *http.Client @@ -324,7 +335,7 @@ type Client struct { } // NewClient returns a new Client. -func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, provider AddressProvider) (*Client, error) { +func NewClient(logger log.Logger, cfg HTTPEndpointsConfig, client *http.Client, provider AddressProvider) (*HTTPClient, error) { if logger == nil { logger = log.NewNopLogger() } @@ -335,9 +346,14 @@ func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, prov if err != nil { return nil, err } - discoverers = append(discoverers, file.NewDiscovery(&fileSDCfg, logger)) + // We provide an empty registry and ignore metrics for now. + discovery, err := file.NewDiscovery(&fileSDCfg, logger, prometheus.NewRegistry()) + if err != nil { + return nil, err + } + discoverers = append(discoverers, discovery) } - return &Client{ + return &HTTPClient{ logger: logger, httpClient: client, scheme: cfg.Scheme, @@ -350,12 +366,12 @@ func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, prov } // Do executes an HTTP request with the underlying HTTP client. -func (c *Client) Do(req *http.Request) (*http.Response, error) { +func (c *HTTPClient) Do(req *http.Request) (*http.Response, error) { return c.httpClient.Do(req) } // Endpoints returns the list of known endpoints. -func (c *Client) Endpoints() []*url.URL { +func (c *HTTPClient) Endpoints() []*url.URL { var urls []*url.URL for _, addr := range c.provider.Addresses() { urls = append(urls, @@ -370,7 +386,7 @@ func (c *Client) Endpoints() []*url.URL { } // Discover runs the service to discover endpoints until the given context is done. -func (c *Client) Discover(ctx context.Context) { +func (c *HTTPClient) Discover(ctx context.Context) { var wg sync.WaitGroup ch := make(chan []*targetgroup.Group) @@ -400,6 +416,6 @@ func (c *Client) Discover(ctx context.Context) { } // Resolve refreshes and resolves the list of targets. -func (c *Client) Resolve(ctx context.Context) error { +func (c *HTTPClient) Resolve(ctx context.Context) error { return c.provider.Resolve(ctx, append(c.fileSDCache.Addresses(), c.staticAddresses...)) } diff --git a/pkg/httpconfig/config.go b/pkg/httpconfig/config.go deleted file mode 100644 index 3280e33378..0000000000 --- a/pkg/httpconfig/config.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package httpconfig - -import ( - "fmt" - "net/url" - "strings" - - "gopkg.in/yaml.v2" - - "github.com/pkg/errors" -) - -// Config is a structure that allows pointing to various HTTP endpoint, e.g ruler connecting to queriers. -type Config struct { - HTTPClientConfig ClientConfig `yaml:"http_config"` - EndpointsConfig EndpointsConfig `yaml:",inline"` -} - -func DefaultConfig() Config { - return Config{ - EndpointsConfig: EndpointsConfig{ - Scheme: "http", - StaticAddresses: []string{}, - FileSDConfigs: []FileSDConfig{}, - }, - } -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultConfig() - type plain Config - return unmarshal((*plain)(c)) -} - -// LoadConfigs loads a list of Config from YAML data. -func LoadConfigs(confYAML []byte) ([]Config, error) { - var queryCfg []Config - if err := yaml.UnmarshalStrict(confYAML, &queryCfg); err != nil { - return nil, err - } - return queryCfg, nil -} - -// BuildConfig returns a configuration from a static addresses. -func BuildConfig(addrs []string) ([]Config, error) { - configs := make([]Config, 0, len(addrs)) - for i, addr := range addrs { - if addr == "" { - return nil, errors.Errorf("static address cannot be empty at index %d", i) - } - // If addr is missing schema, add http. - if !strings.Contains(addr, "://") { - addr = fmt.Sprintf("http://%s", addr) - } - u, err := url.Parse(addr) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse addr %q", addr) - } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.Errorf("%q is not supported scheme for address", u.Scheme) - } - configs = append(configs, Config{ - EndpointsConfig: EndpointsConfig{ - Scheme: u.Scheme, - StaticAddresses: []string{u.Host}, - PathPrefix: u.Path, - }, - }) - } - return configs, nil -} diff --git a/pkg/httpconfig/config_test.go b/pkg/httpconfig/config_test.go deleted file mode 100644 index 4738c56544..0000000000 --- a/pkg/httpconfig/config_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package httpconfig - -import ( - "testing" - - "github.com/efficientgo/core/testutil" -) - -func TestBuildConfig(t *testing.T) { - for _, tc := range []struct { - desc string - addresses []string - err bool - expected []Config - }{ - { - desc: "single addr without path", - addresses: []string{"localhost:9093"}, - expected: []Config{{ - EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", - }, - }}, - }, - { - desc: "1st addr without path, 2nd with", - addresses: []string{"localhost:9093", "localhost:9094/prefix"}, - expected: []Config{ - { - EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", - }, - }, - { - EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9094"}, - Scheme: "http", - PathPrefix: "/prefix", - }, - }, - }, - }, - { - desc: "single addr with path and http scheme", - addresses: []string{"http://localhost:9093"}, - expected: []Config{{ - EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", - }, - }}, - }, - { - desc: "single addr with path and https scheme", - addresses: []string{"https://localhost:9093"}, - expected: []Config{{ - EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "https", - }, - }}, - }, - { - desc: "not supported scheme", - addresses: []string{"ttp://localhost:9093"}, - err: true, - }, - { - desc: "invalid addr", - addresses: []string{"this is not a valid addr"}, - err: true, - }, - { - desc: "empty addr", - addresses: []string{""}, - err: true, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - cfg, err := BuildConfig(tc.addresses) - if tc.err { - testutil.NotOk(t, err) - return - } - - testutil.Equals(t, tc.expected, cfg) - }) - } -} diff --git a/pkg/promclient/promclient.go b/pkg/promclient/promclient.go index 16c292d2f8..30cf85af40 100644 --- a/pkg/promclient/promclient.go +++ b/pkg/promclient/promclient.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/codes" "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" @@ -85,7 +85,7 @@ func NewClient(c HTTPClient, logger log.Logger, userAgent string) *Client { // NewDefaultClient returns Client with tracing tripperware. func NewDefaultClient() *Client { - client, _ := httpconfig.NewHTTPClient(httpconfig.ClientConfig{}, "") + client, _ := clientconfig.NewHTTPClient(clientconfig.HTTPClientConfig{}, "") return NewWithTracingClient( log.NewNopLogger(), client, diff --git a/pkg/query/remote_engine.go b/pkg/query/remote_engine.go index de926b250e..4cb0021083 100644 --- a/pkg/query/remote_engine.go +++ b/pkg/query/remote_engine.go @@ -76,7 +76,7 @@ func (r remoteEndpoints) Engines() []api.RemoteEngine { clients := r.getClients() engines := make([]api.RemoteEngine, len(clients)) for i := range clients { - engines[i] = newRemoteEngine(r.logger, clients[i], r.opts) + engines[i] = NewRemoteEngine(r.logger, clients[i], r.opts) } return engines } @@ -95,7 +95,7 @@ type remoteEngine struct { labelSets []labels.Labels } -func newRemoteEngine(logger log.Logger, queryClient Client, opts Opts) api.RemoteEngine { +func NewRemoteEngine(logger log.Logger, queryClient Client, opts Opts) *remoteEngine { return &remoteEngine{ logger: logger, client: queryClient, @@ -194,6 +194,19 @@ func (r *remoteEngine) NewRangeQuery(_ context.Context, opts promql.QueryOpts, q }, nil } +func (r *remoteEngine) NewInstantQuery(_ context.Context, _ promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + return &remoteQuery{ + logger: r.logger, + client: r.client, + opts: r.opts, + + qs: qs, + start: ts, + end: ts, + interval: 0, + }, nil +} + type remoteQuery struct { logger log.Logger client Client @@ -219,6 +232,54 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result { maxResolution = int64(r.interval.Seconds() / 5) } + // Instant query. + if r.start == r.end { + request := &querypb.QueryRequest{ + Query: r.qs, + TimeSeconds: r.start.Unix(), + TimeoutSeconds: int64(r.opts.Timeout.Seconds()), + EnablePartialResponse: r.opts.EnablePartialResponse, + // TODO (fpetkovski): Allow specifying these parameters at query time. + // This will likely require a change in the remote engine interface. + ReplicaLabels: r.opts.ReplicaLabels, + MaxResolutionSeconds: maxResolution, + EnableDedup: true, + } + + qry, err := r.client.Query(qctx, request) + if err != nil { + return &promql.Result{Err: err} + } + result := make(promql.Vector, 0) + + for { + msg, err := qry.Recv() + if err == io.EOF { + break + } + if err != nil { + return &promql.Result{Err: err} + } + + if warn := msg.GetWarnings(); warn != "" { + return &promql.Result{Err: errors.New(warn)} + } + + ts := msg.GetTimeseries() + + // Point might have a different timestamp, force it to the evaluation + // timestamp as that is when we ran the evaluation. + // See https://github.com/prometheus/prometheus/blob/b727e69b7601b069ded5c34348dca41b80988f4b/promql/engine.go#L693-L699 + if len(ts.Histograms) > 0 { + result = append(result, promql.Sample{Metric: labelpb.ZLabelsToPromLabels(ts.Labels), H: prompb.FromProtoHistogram(ts.Histograms[0]), T: r.start.UnixMilli()}) + } else { + result = append(result, promql.Sample{Metric: labelpb.ZLabelsToPromLabels(ts.Labels), F: ts.Samples[0].Value, T: r.start.UnixMilli()}) + } + } + + return &promql.Result{Value: result} + } + request := &querypb.QueryRangeRequest{ Query: r.qs, StartTimeSeconds: r.start.Unix(), diff --git a/pkg/query/remote_engine_test.go b/pkg/query/remote_engine_test.go index 7ecfdf2301..224acc6039 100644 --- a/pkg/query/remote_engine_test.go +++ b/pkg/query/remote_engine_test.go @@ -23,7 +23,7 @@ import ( func TestRemoteEngine_Warnings(t *testing.T) { client := NewClient(&queryWarnClient{}, "", nil) - engine := newRemoteEngine(log.NewNopLogger(), client, Opts{ + engine := NewRemoteEngine(log.NewNopLogger(), client, Opts{ Timeout: 1 * time.Second, }) var ( @@ -87,7 +87,7 @@ func TestRemoteEngine_LabelSets(t *testing.T) { for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { client := NewClient(nil, "", testCase.tsdbInfos) - engine := newRemoteEngine(log.NewNopLogger(), client, Opts{ + engine := NewRemoteEngine(log.NewNopLogger(), client, Opts{ ReplicaLabels: testCase.replicaLabels, }) @@ -174,7 +174,7 @@ func TestRemoteEngine_MinT(t *testing.T) { for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { client := NewClient(nil, "", testCase.tsdbInfos) - engine := newRemoteEngine(log.NewNopLogger(), client, Opts{ + engine := NewRemoteEngine(log.NewNopLogger(), client, Opts{ ReplicaLabels: testCase.replicaLabels, }) diff --git a/pkg/queryfrontend/config.go b/pkg/queryfrontend/config.go index ba45f80d9e..176052bd33 100644 --- a/pkg/queryfrontend/config.go +++ b/pkg/queryfrontend/config.go @@ -142,6 +142,7 @@ func NewCacheConfig(logger log.Logger, confContentYaml []byte) (*cortexcache.Con Timeout: config.Memcached.Timeout, MaxIdleConns: config.Memcached.MaxIdleConnections, Addresses: strings.Join(config.Memcached.Addresses, ","), + AutoDiscovery: config.Memcached.AutoDiscovery, UpdateInterval: config.Memcached.DNSProviderUpdateInterval, MaxItemSize: int(config.Memcached.MaxItemSize), }, @@ -208,6 +209,7 @@ type Config struct { TenantHeader string DefaultTenant string TenantCertField string + EnableXFunctions bool } // QueryRangeConfig holds the config for query range tripperware. diff --git a/pkg/receive/handler.go b/pkg/receive/handler.go index ec03fb79ef..c577eacb6e 100644 --- a/pkg/receive/handler.go +++ b/pkg/receive/handler.go @@ -106,7 +106,7 @@ type Handler struct { mtx sync.RWMutex hashring Hashring - peers *peerGroup + peers peersContainer receiverMode ReceiverMode forwardRequests *prometheus.CounterVec @@ -195,11 +195,21 @@ func NewHandler(logger log.Logger, o *Options) *Handler { ins := extpromhttp.NewNopInstrumentationMiddleware() if o.Registry != nil { + var buckets = []float64{0.001, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1, 2, 3, 4, 5} + + const bucketIncrement = 2.0 + for curMax := 5.0 + bucketIncrement; curMax < o.ForwardTimeout.Seconds(); curMax += bucketIncrement { + buckets = append(buckets, curMax) + } + if buckets[len(buckets)-1] < o.ForwardTimeout.Seconds() { + buckets = append(buckets, o.ForwardTimeout.Seconds()) + } + ins = extpromhttp.NewTenantInstrumentationMiddleware( o.TenantHeader, o.DefaultTenantID, o.Registry, - []float64{0.001, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1, 2, 3, 4, 5}, + buckets, ) } @@ -242,10 +252,48 @@ func (h *Handler) Hashring(hashring Hashring) { h.mtx.Lock() defer h.mtx.Unlock() + if h.hashring != nil { + previousNodes := h.hashring.Nodes() + newNodes := hashring.Nodes() + + disappearedNodes := getSortedStringSliceDiff(previousNodes, newNodes) + for _, node := range disappearedNodes { + if err := h.peers.close(node); err != nil { + level.Error(h.logger).Log("msg", "closing gRPC connection failed, we might have leaked a file descriptor", "addr", node, "err", err.Error()) + } + } + } + h.hashring = hashring h.peers.resetBackoff() } +// getSortedStringSliceDiff returns items which are in slice1 but not in slice2. +// The returned slice also only contains unique items i.e. it is a set. +func getSortedStringSliceDiff(slice1, slice2 []string) []string { + slice1Items := make(map[string]struct{}, len(slice1)) + slice2Items := make(map[string]struct{}, len(slice2)) + + for _, s1 := range slice1 { + slice1Items[s1] = struct{}{} + } + for _, s2 := range slice2 { + slice2Items[s2] = struct{}{} + } + + var difference = make([]string, 0) + for s1 := range slice1Items { + _, s2Contains := slice2Items[s1] + if s2Contains { + continue + } + difference = append(difference, s1) + } + sort.Strings(difference) + + return difference +} + // Verifies whether the server is ready or not. func (h *Handler) isReady() bool { h.mtx.RLock() @@ -1144,10 +1192,10 @@ func newReplicationErrors(threshold, numErrors int) []*replicationErrors { return errs } -func newPeerGroup(backoff backoff.Backoff, dialOpts ...grpc.DialOption) *peerGroup { +func newPeerGroup(backoff backoff.Backoff, dialOpts ...grpc.DialOption) peersContainer { return &peerGroup{ dialOpts: dialOpts, - cache: map[string]storepb.WriteableStoreClient{}, + cache: map[string]*grpc.ClientConn{}, m: sync.RWMutex{}, dialer: grpc.DialContext, peerStates: make(map[string]*retryState), @@ -1155,9 +1203,14 @@ func newPeerGroup(backoff backoff.Backoff, dialOpts ...grpc.DialOption) *peerGro } } +type peersContainer interface { + close(string) error + get(context.Context, string) (storepb.WriteableStoreClient, error) +} + type peerGroup struct { dialOpts []grpc.DialOption - cache map[string]storepb.WriteableStoreClient + cache map[string]*grpc.ClientConn peerStates map[string]*retryState expBackoff backoff.Backoff @@ -1167,13 +1220,32 @@ type peerGroup struct { dialer func(ctx context.Context, target string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) } +func (p *peerGroup) close(addr string) error { + p.m.Lock() + defer p.m.Unlock() + + c, ok := p.cache[addr] + if !ok { + // NOTE(GiedriusS): this could be valid case when the connection + // was never established. + return nil + } + + delete(p.cache, addr) + if err := c.Close(); err != nil { + return fmt.Errorf("closing connection for %s", addr) + } + + return nil +} + func (p *peerGroup) get(ctx context.Context, addr string) (storepb.WriteableStoreClient, error) { // use a RLock first to prevent blocking if we don't need to. p.m.RLock() c, ok := p.cache[addr] p.m.RUnlock() if ok { - return c, nil + return storepb.NewWriteableStoreClient(c), nil } p.m.Lock() @@ -1181,16 +1253,15 @@ func (p *peerGroup) get(ctx context.Context, addr string) (storepb.WriteableStor // Make sure that another caller hasn't created the connection since obtaining the write lock. c, ok = p.cache[addr] if ok { - return c, nil + return storepb.NewWriteableStoreClient(c), nil } conn, err := p.dialer(ctx, addr, p.dialOpts...) if err != nil { return nil, errors.Wrap(err, "failed to dial peer") } - client := storepb.NewWriteableStoreClient(conn) - p.cache[addr] = client - return client, nil + p.cache[addr] = conn + return storepb.NewWriteableStoreClient(conn), nil } func (p *peerGroup) markPeerDown(addr string) { diff --git a/pkg/receive/handler_test.go b/pkg/receive/handler_test.go index 0ff92682e0..de3bfc2303 100644 --- a/pkg/receive/handler_test.go +++ b/pkg/receive/handler_test.go @@ -163,24 +163,42 @@ func (f *fakeAppender) Rollback() error { return f.rollbackErr() } +func (f *fakeAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + panic("not implemented") +} + +type fakePeersGroup struct { + clients map[string]storepb.WriteableStoreClient + + closeCalled map[string]bool +} + +func (g *fakePeersGroup) close(addr string) error { + if g.closeCalled == nil { + g.closeCalled = map[string]bool{} + } + g.closeCalled[addr] = true + return nil +} + +func (g *fakePeersGroup) get(_ context.Context, addr string) (storepb.WriteableStoreClient, error) { + c, ok := g.clients[addr] + if !ok { + return nil, fmt.Errorf("client %s not found", addr) + } + return c, nil +} + +var _ = (peersContainer)(&fakePeersGroup{}) + func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uint64, hashringAlgo HashringAlgorithm) ([]*Handler, Hashring, error) { var ( cfg = []HashringConfig{{Hashring: "test"}} handlers []*Handler wOpts = &WriterOptions{} ) - // create a fake peer group where we manually fill the cache with fake addresses pointed to our handlers - // This removes the network from the tests and creates a more consistent testing harness. - peers := &peerGroup{ - dialOpts: nil, - m: sync.RWMutex{}, - cache: map[string]storepb.WriteableStoreClient{}, - dialer: func(context.Context, string, ...grpc.DialOption) (*grpc.ClientConn, error) { - // dialer should never be called since we are creating fake clients with fake addresses - // this protects against some leaking test that may attempt to dial random IP addresses - // which may pose a security risk. - return nil, errors.New("unexpected dial called in testing") - }, + fakePeers := &fakePeersGroup{ + clients: map[string]storepb.WriteableStoreClient{}, } ag := addrGen{} @@ -196,11 +214,11 @@ func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uin Limiter: limiter, }) handlers = append(handlers, h) - h.peers = peers addr := ag.newAddr() + h.peers = fakePeers + fakePeers.clients[addr] = &fakeRemoteWriteGRPCServer{h: h} h.options.Endpoint = addr cfg[0].Endpoints = append(cfg[0].Endpoints, Endpoint{Address: h.options.Endpoint}) - peers.cache[addr] = &fakeRemoteWriteGRPCServer{h: h} } // Use hashmod as default. if hashringAlgo == "" { @@ -1587,3 +1605,35 @@ func TestGetStatsLimitParameter(t *testing.T) { testutil.Equals(t, limit, givenLimit) }) } + +func TestSortedSliceDiff(t *testing.T) { + testutil.Equals(t, []string{"a"}, getSortedStringSliceDiff([]string{"a", "a", "foo"}, []string{"b", "b", "foo"})) + testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{"b", "b", "foo"})) + testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{})) +} + +func TestHashringChangeCallsClose(t *testing.T) { + appendables := []*fakeAppendable{ + { + appender: newFakeAppender(nil, nil, nil), + }, + { + appender: newFakeAppender(nil, nil, nil), + }, + { + appender: newFakeAppender(nil, nil, nil), + }, + } + allHandlers, _, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod) + testutil.Ok(t, err) + + appendables = appendables[1:] + + _, smallHashring, err := newTestHandlerHashring(appendables, 2, AlgorithmHashmod) + testutil.Ok(t, err) + + allHandlers[0].Hashring(smallHashring) + + pg := allHandlers[0].peers.(*fakePeersGroup) + testutil.Assert(t, len(pg.closeCalled) > 0) +} diff --git a/pkg/receive/hashring.go b/pkg/receive/hashring.go index 18925cc4cc..0d7c2dc10c 100644 --- a/pkg/receive/hashring.go +++ b/pkg/receive/hashring.go @@ -55,6 +55,9 @@ type Hashring interface { Get(tenant string, timeSeries *prompb.TimeSeries) (string, error) // GetN returns the nth node that should handle the given tenant and time series. GetN(tenant string, timeSeries *prompb.TimeSeries, n uint64) (string, error) + // Nodes returns a sorted slice of nodes that are in this hashring. Addresses could be duplicated + // if, for example, the same address is used for multiple tenants in the multi-hashring. + Nodes() []string } // SingleNodeHashring always returns the same node. @@ -65,6 +68,10 @@ func (s SingleNodeHashring) Get(tenant string, ts *prompb.TimeSeries) (string, e return s.GetN(tenant, ts, 0) } +func (s SingleNodeHashring) Nodes() []string { + return []string{string(s)} +} + // GetN implements the Hashring interface. func (s SingleNodeHashring) GetN(_ string, _ *prompb.TimeSeries, n uint64) (string, error) { if n > 0 { @@ -84,9 +91,15 @@ func newSimpleHashring(endpoints []Endpoint) (Hashring, error) { } addresses[i] = endpoints[i].Address } + sort.Strings(addresses) + return simpleHashring(addresses), nil } +func (s simpleHashring) Nodes() []string { + return s +} + // Get returns a target to handle the given tenant and time series. func (s simpleHashring) Get(tenant string, ts *prompb.TimeSeries) (string, error) { return s.GetN(tenant, ts, 0) @@ -120,6 +133,7 @@ type ketamaHashring struct { endpoints []Endpoint sections sections numEndpoints uint64 + nodes []string } func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFactor uint64) (*ketamaHashring, error) { @@ -132,8 +146,11 @@ func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFac hash := xxhash.New() availabilityZones := make(map[string]struct{}) ringSections := make(sections, 0, numSections) + + nodes := []string{} for endpointIndex, endpoint := range endpoints { availabilityZones[endpoint.AZ] = struct{}{} + nodes = append(nodes, endpoint.Address) for i := 1; i <= sectionsPerNode; i++ { _, _ = hash.Write([]byte(endpoint.Address + ":" + strconv.Itoa(i))) n := §ion{ @@ -148,15 +165,21 @@ func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFac } } sort.Sort(ringSections) + sort.Strings(nodes) calculateSectionReplicas(ringSections, replicationFactor, availabilityZones) return &ketamaHashring{ endpoints: endpoints, sections: ringSections, numEndpoints: uint64(len(endpoints)), + nodes: nodes, }, nil } +func (k *ketamaHashring) Nodes() []string { + return k.nodes +} + func sizeOfLeastOccupiedAZ(azSpread map[string]int64) int64 { minValue := int64(math.MaxInt64) for _, value := range azSpread { @@ -232,6 +255,8 @@ type multiHashring struct { // to the cache map, as this is both written to // and read from. mu sync.RWMutex + + nodes []string } // Get returns a target to handle the given tenant and time series. @@ -269,6 +294,10 @@ func (m *multiHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (st return "", errors.New("no matching hashring to handle tenant") } +func (m *multiHashring) Nodes() []string { + return m.nodes +} + // newMultiHashring creates a multi-tenant hashring for a given slice of // groups. // Which hashring to use for a tenant is determined @@ -289,6 +318,7 @@ func NewMultiHashring(algorithm HashringAlgorithm, replicationFactor uint64, cfg if err != nil { return nil, err } + m.nodes = append(m.nodes, hashring.Nodes()...) m.hashrings = append(m.hashrings, hashring) var t map[string]struct{} if len(h.Tenants) != 0 { @@ -299,6 +329,7 @@ func NewMultiHashring(algorithm HashringAlgorithm, replicationFactor uint64, cfg } m.tenantSets = append(m.tenantSets, t) } + sort.Strings(m.nodes) return m, nil } diff --git a/pkg/receive/head_series_limiter.go b/pkg/receive/head_series_limiter.go index 994221c2f6..492e3ecdf2 100644 --- a/pkg/receive/head_series_limiter.go +++ b/pkg/receive/head_series_limiter.go @@ -14,8 +14,9 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/errors" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/promclient" ) @@ -86,13 +87,13 @@ func NewHeadSeriesLimit(w WriteLimitsConfig, registerer prometheus.Registerer, l limit.tenantCurrentSeriesMap = map[string]float64{} // Use specified HTTPConfig (if any) to make requests to meta-monitoring. - c := httpconfig.NewDefaultClientConfig() + c := clientconfig.NewDefaultHTTPClientConfig() if w.GlobalLimits.MetaMonitoringHTTPClient != nil { c = *w.GlobalLimits.MetaMonitoringHTTPClient } var err error - limit.metaMonitoringClient, err = httpconfig.NewHTTPClient(c, "meta-mon-for-limit") + limit.metaMonitoringClient, err = clientconfig.NewHTTPClient(c, "meta-mon-for-limit") if err != nil { level.Error(logger).Log("msg", "improper http client config", "err", err.Error()) } @@ -104,7 +105,7 @@ func NewHeadSeriesLimit(w WriteLimitsConfig, registerer prometheus.Registerer, l // solution with the configured query for getting current active (head) series of all tenants. // It then populates tenantCurrentSeries map with result. func (h *headSeriesLimit) QueryMetaMonitoring(ctx context.Context) error { - c := promclient.NewWithTracingClient(h.logger, h.metaMonitoringClient, httpconfig.ThanosUserAgent) + c := promclient.NewWithTracingClient(h.logger, h.metaMonitoringClient, clientconfig.ThanosUserAgent) vectorRes, _, _, err := c.QueryInstant(ctx, h.metaMonitoringURL, h.metaMonitoringQuery, time.Now(), promclient.QueryOptions{Deduplicate: true}) if err != nil { diff --git a/pkg/receive/limiter_config.go b/pkg/receive/limiter_config.go index c3bd330b6e..cce9af3cd1 100644 --- a/pkg/receive/limiter_config.go +++ b/pkg/receive/limiter_config.go @@ -6,9 +6,10 @@ package receive import ( "net/url" - "github.com/thanos-io/thanos/pkg/errors" - "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" + + "github.com/thanos-io/thanos/pkg/clientconfig" + "github.com/thanos-io/thanos/pkg/errors" ) // RootLimitsConfig is the root configuration for limits. @@ -64,9 +65,9 @@ type GlobalLimitsConfig struct { // MaxConcurrency represents the maximum concurrency during write operations. MaxConcurrency int64 `yaml:"max_concurrency"` // MetaMonitoring options specify the query, url and client for Query API address used in head series limiting. - MetaMonitoringURL string `yaml:"meta_monitoring_url"` - MetaMonitoringHTTPClient *httpconfig.ClientConfig `yaml:"meta_monitoring_http_client"` - MetaMonitoringLimitQuery string `yaml:"meta_monitoring_limit_query"` + MetaMonitoringURL string `yaml:"meta_monitoring_url"` + MetaMonitoringHTTPClient *clientconfig.HTTPClientConfig `yaml:"meta_monitoring_http_client"` + MetaMonitoringLimitQuery string `yaml:"meta_monitoring_limit_query"` metaMonitoringURL *url.URL } diff --git a/pkg/rules/manager_test.go b/pkg/rules/manager_test.go index 7c36a56da2..2bee9a698e 100644 --- a/pkg/rules/manager_test.go +++ b/pkg/rules/manager_test.go @@ -42,6 +42,7 @@ type nopAppender struct{} func (n nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { return 0, nil } + func (n nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { return 0, nil } @@ -50,6 +51,10 @@ func (n nopAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t i return 0, nil } +func (n nopAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return 0, nil +} + func (n nopAppender) Commit() error { return nil } func (n nopAppender) Rollback() error { return nil } func (n nopAppender) Appender(_ context.Context) (storage.Appender, error) { return n, nil } diff --git a/pkg/rules/queryable.go b/pkg/rules/queryable.go index d178df7ba1..3d2335fd5a 100644 --- a/pkg/rules/queryable.go +++ b/pkg/rules/queryable.go @@ -20,7 +20,7 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/thanos/internal/cortex/querier/series" - "github.com/thanos-io/thanos/pkg/httpconfig" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/store/storepb" ) @@ -31,7 +31,7 @@ type promClientsQueryable struct { logger log.Logger promClients []*promclient.Client - queryClients []*httpconfig.Client + queryClients []*clientconfig.HTTPClient ignoredLabelNames []string duplicatedQuery prometheus.Counter @@ -43,7 +43,7 @@ type promClientsQuerier struct { logger log.Logger promClients []*promclient.Client - queryClients []*httpconfig.Client + queryClients []*clientconfig.HTTPClient restoreIgnoreLabels []string // We use a dummy counter here because the duplicated @@ -52,7 +52,7 @@ type promClientsQuerier struct { } // NewPromClientsQueryable creates a queryable that queries queriers from Prometheus clients. -func NewPromClientsQueryable(logger log.Logger, queryClients []*httpconfig.Client, promClients []*promclient.Client, +func NewPromClientsQueryable(logger log.Logger, queryClients []*clientconfig.HTTPClient, promClients []*promclient.Client, httpMethod string, step time.Duration, ignoredLabelNames []string) *promClientsQueryable { return &promClientsQueryable{ logger: logger, diff --git a/pkg/store/acceptance_test.go b/pkg/store/acceptance_test.go index 6bfe2da421..34abc649ab 100644 --- a/pkg/store/acceptance_test.go +++ b/pkg/store/acceptance_test.go @@ -6,28 +6,33 @@ package store import ( "context" "fmt" + "math" "net/url" "os" "path/filepath" "testing" "time" + "github.com/efficientgo/core/testutil" "github.com/go-kit/log" "github.com/pkg/errors" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" - "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/efficientgo/core/testutil" "github.com/thanos-io/objstore" "github.com/thanos-io/objstore/providers/filesystem" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/promclient" + "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/store/storepb/prompb" "github.com/thanos-io/thanos/pkg/testutil/custom" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) @@ -62,8 +67,10 @@ type seriesCallCase struct { expectErr error } +type startStoreFn func(t *testing.T, extLset labels.Labels, append func(app storage.Appender)) storepb.StoreServer + // testStoreAPIsAcceptance tests StoreAPI from closed box perspective. -func testStoreAPIsAcceptance(t *testing.T, startStore func(t *testing.T, extLset labels.Labels, append func(app storage.Appender)) storepb.StoreServer) { +func testStoreAPIsAcceptance(t *testing.T, startStore startStoreFn) { t.Helper() now := time.Now() @@ -232,7 +239,6 @@ func testStoreAPIsAcceptance(t *testing.T, startStore func(t *testing.T, extLset appendFn: func(app storage.Appender) { _, err := app.Append(0, labels.FromStrings("foo", "bar", "region", "somewhere"), 0, 0) testutil.Ok(t, err) - testutil.Ok(t, app.Commit()) }, seriesCalls: []seriesCallCase{ @@ -743,9 +749,9 @@ func testStoreAPIsAcceptance(t *testing.T, startStore func(t *testing.T, extLset } testutil.Ok(t, err) - testutil.Equals(t, true, slices.IsSortedFunc(srv.SeriesSet, func(x, y storepb.Series) int { + testutil.Assert(t, slices.IsSortedFunc(srv.SeriesSet, func(x, y storepb.Series) int { return labels.Compare(x.PromLabels(), y.PromLabels()) - })) + }), "Unsorted Series response returned") receivedLabels := make([]labels.Labels, 0) for _, s := range srv.SeriesSet { @@ -759,12 +765,70 @@ func testStoreAPIsAcceptance(t *testing.T, startStore func(t *testing.T, extLset } } +// Regression test for https://github.com/thanos-io/thanos/issues/396. +// Note: Only TSDB and Prometheus Stores do this. +func testStoreAPIsSeriesSplitSamplesIntoChunksWithMaxSizeOf120(t *testing.T, startStore startStoreFn) { + t.Run("should split into chunks of max size 120", func(t *testing.T) { + baseT := timestamp.FromTime(time.Now().AddDate(0, 0, -2)) / 1000 * 1000 + offset := int64(2*math.MaxUint16 + 5) + + extLset := labels.FromStrings("region", "eu-west") + appendFn := func(app storage.Appender) { + + var ( + ref storage.SeriesRef + err error + ) + for i := int64(0); i < offset; i++ { + ref, err = app.Append(ref, labels.FromStrings("a", "b"), baseT+i, 1) + testutil.Ok(t, err) + } + testutil.Ok(t, app.Commit()) + + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := startStore(t, extLset, appendFn) + srv := newStoreSeriesServer(ctx) + + testutil.Ok(t, client.Series(&storepb.SeriesRequest{ + MinTime: baseT, + MaxTime: baseT + offset, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "b"}, + {Type: storepb.LabelMatcher_EQ, Name: "region", Value: "eu-west"}, + }, + }, srv)) + + testutil.Equals(t, 1, len(srv.SeriesSet)) + + firstSeries := srv.SeriesSet[0] + + testutil.Equals(t, []labelpb.ZLabel{ + {Name: "a", Value: "b"}, + {Name: "region", Value: "eu-west"}, + }, firstSeries.Labels) + + testutil.Equals(t, 1093, len(firstSeries.Chunks)) + for i := 0; i < len(firstSeries.Chunks)-1; i++ { + chunk, err := chunkenc.FromData(chunkenc.EncXOR, firstSeries.Chunks[i].Raw.Data) + testutil.Ok(t, err) + testutil.Equals(t, 120, chunk.NumSamples()) + } + + chunk, err := chunkenc.FromData(chunkenc.EncXOR, firstSeries.Chunks[len(firstSeries.Chunks)-1].Raw.Data) + testutil.Ok(t, err) + testutil.Equals(t, 35, chunk.NumSamples()) + }) +} + func TestBucketStore_Acceptance(t *testing.T) { t.Cleanup(func() { custom.TolerantVerifyLeak(t) }) ctx := context.Background() - for _, lazyExpandedPosting := range []bool{false, true} { - testStoreAPIsAcceptance(t, func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { + startStore := func(lazyExpandedPostings bool) func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { + return func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { tmpDir := tt.TempDir() bktDir := filepath.Join(tmpDir, "bkt") auxDir := filepath.Join(tmpDir, "aux") @@ -835,7 +899,7 @@ func TestBucketStore_Acceptance(t *testing.T) { 1*time.Minute, WithChunkPool(chunkPool), WithFilterConfig(allowAllFilterConf), - WithLazyExpandedPostings(lazyExpandedPosting), + WithLazyExpandedPostings(lazyExpandedPostings), ) testutil.Ok(tt, err) tt.Cleanup(func() { testutil.Ok(tt, bucketStore.Close()) }) @@ -843,6 +907,12 @@ func TestBucketStore_Acceptance(t *testing.T) { testutil.Ok(tt, bucketStore.SyncBlocks(context.Background())) return bucketStore + } + } + + for _, lazyExpandedPostings := range []bool{false, true} { + t.Run(fmt.Sprintf("lazyExpandedPostings:%t", lazyExpandedPostings), func(t *testing.T) { + testStoreAPIsAcceptance(t, startStore(lazyExpandedPostings)) }) } } @@ -850,7 +920,7 @@ func TestBucketStore_Acceptance(t *testing.T) { func TestPrometheusStore_Acceptance(t *testing.T) { t.Cleanup(func() { custom.TolerantVerifyLeak(t) }) - testStoreAPIsAcceptance(t, func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { + startStore := func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { p, err := e2eutil.NewPrometheus() testutil.Ok(tt, err) tt.Cleanup(func() { testutil.Ok(tt, p.Stop()) }) @@ -870,21 +940,28 @@ func TestPrometheusStore_Acceptance(t *testing.T) { func() string { return version }) testutil.Ok(tt, err) + // We build chunks only for SAMPLES method. Make sure we ask for SAMPLES only. + promStore.remoteReadAcceptableResponses = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES} + return promStore - }) + } + + testStoreAPIsAcceptance(t, startStore) + testStoreAPIsSeriesSplitSamplesIntoChunksWithMaxSizeOf120(t, startStore) } func TestTSDBStore_Acceptance(t *testing.T) { t.Cleanup(func() { custom.TolerantVerifyLeak(t) }) - testStoreAPIsAcceptance(t, func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { + startStore := func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer { db, err := e2eutil.NewTSDB() testutil.Ok(tt, err) tt.Cleanup(func() { testutil.Ok(tt, db.Close()) }) + appendFn(db.Appender(context.Background())) - tsdbStore := NewTSDBStore(nil, db, component.Rule, extLset) + return NewTSDBStore(nil, db, component.Rule, extLset) + } - appendFn(db.Appender(context.Background())) - return tsdbStore - }) + testStoreAPIsAcceptance(t, startStore) + testStoreAPIsSeriesSplitSamplesIntoChunksWithMaxSizeOf120(t, startStore) } diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 3ebd6f06a4..fd4fb7392c 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -413,6 +413,8 @@ type BucketStore struct { blockEstimatedMaxSeriesFunc BlockEstimator blockEstimatedMaxChunkFunc BlockEstimator + + indexHeaderLazyDownloadStrategy indexheader.LazyDownloadIndexHeaderFunc } func (s *BucketStore) validate() error { @@ -531,6 +533,14 @@ func WithDontResort(true bool) BucketStoreOption { } } +// WithIndexHeaderLazyDownloadStrategy specifies what block to lazy download its index header. +// Only used when lazy mmap is enabled at the same time. +func WithIndexHeaderLazyDownloadStrategy(strategy indexheader.LazyDownloadIndexHeaderFunc) BucketStoreOption { + return func(s *BucketStore) { + s.indexHeaderLazyDownloadStrategy = strategy + } +} + // NewBucketStore creates a new bucket backed store that implements the store API against // an object store bucket. It is optimized to work against high latency backends. func NewBucketStore( @@ -559,21 +569,22 @@ func NewBucketStore( b := make([]byte, 0, initialBufSize) return &b }}, - chunkPool: pool.NoopBytes{}, - blocks: map[ulid.ULID]*bucketBlock{}, - blockSets: map[uint64]*bucketBlockSet{}, - blockSyncConcurrency: blockSyncConcurrency, - queryGate: gate.NewNoop(), - chunksLimiterFactory: chunksLimiterFactory, - seriesLimiterFactory: seriesLimiterFactory, - bytesLimiterFactory: bytesLimiterFactory, - partitioner: partitioner, - enableCompatibilityLabel: enableCompatibilityLabel, - postingOffsetsInMemSampling: postingOffsetsInMemSampling, - enableSeriesResponseHints: enableSeriesResponseHints, - enableChunkHashCalculation: enableChunkHashCalculation, - seriesBatchSize: SeriesBatchSize, - sortingStrategy: sortingStrategyStore, + chunkPool: pool.NoopBytes{}, + blocks: map[ulid.ULID]*bucketBlock{}, + blockSets: map[uint64]*bucketBlockSet{}, + blockSyncConcurrency: blockSyncConcurrency, + queryGate: gate.NewNoop(), + chunksLimiterFactory: chunksLimiterFactory, + seriesLimiterFactory: seriesLimiterFactory, + bytesLimiterFactory: bytesLimiterFactory, + partitioner: partitioner, + enableCompatibilityLabel: enableCompatibilityLabel, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + enableSeriesResponseHints: enableSeriesResponseHints, + enableChunkHashCalculation: enableChunkHashCalculation, + seriesBatchSize: SeriesBatchSize, + sortingStrategy: sortingStrategyStore, + indexHeaderLazyDownloadStrategy: indexheader.AlwaysEagerDownloadIndexHeader, } for _, option := range options { @@ -582,7 +593,7 @@ func NewBucketStore( // Depend on the options indexReaderPoolMetrics := indexheader.NewReaderPoolMetrics(extprom.WrapRegistererWithPrefix("thanos_bucket_store_", s.reg)) - s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics) + s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics, s.indexHeaderLazyDownloadStrategy) s.metrics = newBucketStoreMetrics(s.reg) // TODO(metalmatze): Might be possible via Option too if err := s.validate(); err != nil { @@ -759,6 +770,7 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er s.dir, meta.ULID, s.postingOffsetsInMemSampling, + meta, ) if err != nil { return errors.Wrap(err, "create index header reader") diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 87659f5450..67223a9467 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -1658,7 +1658,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { bkt: objstore.WithNoopInstr(bkt), logger: logger, indexCache: indexCache, - indexReaderPool: indexheader.NewReaderPool(log.NewNopLogger(), false, 0, indexheader.NewReaderPoolMetrics(nil)), + indexReaderPool: indexheader.NewReaderPool(log.NewNopLogger(), false, 0, indexheader.NewReaderPoolMetrics(nil), indexheader.AlwaysEagerDownloadIndexHeader), metrics: newBucketStoreMetrics(nil), blockSets: map[uint64]*bucketBlockSet{ labels.Labels{{Name: "ext1", Value: "1"}}.Hash(): {blocks: [][]*bucketBlock{{b1, b2}}}, diff --git a/pkg/store/prometheus.go b/pkg/store/prometheus.go index ea48f7e1a2..197364ca04 100644 --- a/pkg/store/prometheus.go +++ b/pkg/store/prometheus.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/dedup" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" @@ -578,7 +578,7 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que preq.Header.Set("Content-Type", "application/x-stream-protobuf") preq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") - preq.Header.Set("User-Agent", httpconfig.ThanosUserAgent) + preq.Header.Set("User-Agent", clientconfig.ThanosUserAgent) presp, err = p.client.Do(preq.WithContext(ctx)) if err != nil { return nil, errors.Wrap(err, "send request") diff --git a/pkg/store/prometheus_test.go b/pkg/store/prometheus_test.go index 5b7ff18736..079d1f2f4d 100644 --- a/pkg/store/prometheus_test.go +++ b/pkg/store/prometheus_test.go @@ -18,7 +18,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/efficientgo/core/testutil" @@ -27,7 +26,6 @@ import ( "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/store/storepb/prompb" "github.com/thanos-io/thanos/pkg/testutil/custom" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) @@ -520,80 +518,3 @@ func TestPrometheusStore_Info(t *testing.T) { testutil.Equals(t, int64(123), resp.MinTime) testutil.Equals(t, int64(456), resp.MaxTime) } - -func testSeries_SplitSamplesIntoChunksWithMaxSizeOf120(t *testing.T, appender storage.Appender, newStore func() storepb.StoreServer) { - baseT := timestamp.FromTime(time.Now().AddDate(0, 0, -2)) / 1000 * 1000 - - offset := int64(2*math.MaxUint16 + 5) - for i := int64(0); i < offset; i++ { - _, err := appender.Append(0, labels.FromStrings("a", "b", "region", "eu-west"), baseT+i, 1) - testutil.Ok(t, err) - } - - testutil.Ok(t, appender.Commit()) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - client := newStore() - srv := newStoreSeriesServer(ctx) - - testutil.Ok(t, client.Series(&storepb.SeriesRequest{ - MinTime: baseT, - MaxTime: baseT + offset, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "b"}, - {Type: storepb.LabelMatcher_EQ, Name: "region", Value: "eu-west"}, - }, - }, srv)) - - testutil.Equals(t, 1, len(srv.SeriesSet)) - - firstSeries := srv.SeriesSet[0] - - testutil.Equals(t, []labelpb.ZLabel{ - {Name: "a", Value: "b"}, - {Name: "region", Value: "eu-west"}, - }, firstSeries.Labels) - - testutil.Equals(t, 1093, len(firstSeries.Chunks)) - - chunk, err := chunkenc.FromData(chunkenc.EncXOR, firstSeries.Chunks[0].Raw.Data) - testutil.Ok(t, err) - testutil.Equals(t, 120, chunk.NumSamples()) - - chunk, err = chunkenc.FromData(chunkenc.EncXOR, firstSeries.Chunks[1].Raw.Data) - testutil.Ok(t, err) - testutil.Equals(t, 120, chunk.NumSamples()) - - chunk, err = chunkenc.FromData(chunkenc.EncXOR, firstSeries.Chunks[len(firstSeries.Chunks)-1].Raw.Data) - testutil.Ok(t, err) - testutil.Equals(t, 35, chunk.NumSamples()) -} - -// Regression test for https://github.com/thanos-io/thanos/issues/396. -func TestPrometheusStore_Series_SplitSamplesIntoChunksWithMaxSizeOf120(t *testing.T) { - defer custom.TolerantVerifyLeak(t) - - p, err := e2eutil.NewPrometheus() - testutil.Ok(t, err) - defer func() { testutil.Ok(t, p.Stop()) }() - - testSeries_SplitSamplesIntoChunksWithMaxSizeOf120(t, p.Appender(), func() storepb.StoreServer { - testutil.Ok(t, p.Start(context.Background(), log.NewNopLogger())) - - u, err := url.Parse(fmt.Sprintf("http://%s", p.Addr())) - testutil.Ok(t, err) - - proxy, err := NewPrometheusStore(nil, nil, promclient.NewDefaultClient(), u, component.Sidecar, - func() labels.Labels { return labels.FromStrings("region", "eu-west") }, - func() (int64, int64) { return 0, math.MaxInt64 }, - nil) - testutil.Ok(t, err) - - // We build chunks only for SAMPLES method. Make sure we ask for SAMPLES only. - proxy.remoteReadAcceptableResponses = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES} - - return proxy - }) -} diff --git a/pkg/store/storepb/prompb/custom.go b/pkg/store/storepb/prompb/custom.go index fb3b395a9a..5619977da9 100644 --- a/pkg/store/storepb/prompb/custom.go +++ b/pkg/store/storepb/prompb/custom.go @@ -3,7 +3,19 @@ package prompb +import ( + "github.com/prometheus/prometheus/model/histogram" +) + func (h Histogram) IsFloatHistogram() bool { _, ok := h.GetCount().(*Histogram_CountFloat) return ok } + +func FromProtoHistogram(h Histogram) *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return FloatHistogramProtoToFloatHistogram(h) + } else { + return HistogramProtoToFloatHistogram(h) + } +} diff --git a/pkg/store/tsdb_test.go b/pkg/store/tsdb_test.go index 6dcc033c1c..a7066dca3b 100644 --- a/pkg/store/tsdb_test.go +++ b/pkg/store/tsdb_test.go @@ -228,20 +228,6 @@ func TestTSDBStore_Series(t *testing.T) { } } -// Regression test for https://github.com/thanos-io/thanos/issues/1038. -func TestTSDBStore_Series_SplitSamplesIntoChunksWithMaxSizeOf120(t *testing.T) { - defer custom.TolerantVerifyLeak(t) - - db, err := e2eutil.NewTSDB() - defer func() { testutil.Ok(t, db.Close()) }() - testutil.Ok(t, err) - - testSeries_SplitSamplesIntoChunksWithMaxSizeOf120(t, db.Appender(context.Background()), func() storepb.StoreServer { - return NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) - - }) -} - type delegatorServer struct { *storetestutil.SeriesServer diff --git a/pkg/tenancy/tenancy.go b/pkg/tenancy/tenancy.go index f8b54bcc48..aec0bad86a 100644 --- a/pkg/tenancy/tenancy.go +++ b/pkg/tenancy/tenancy.go @@ -8,9 +8,11 @@ import ( "net/http" "path" - "google.golang.org/grpc/metadata" - "github.com/pkg/errors" + "github.com/prometheus-community/prom-label-proxy/injectproxy" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "google.golang.org/grpc/metadata" ) type contextKey int @@ -136,3 +138,97 @@ func GetTenantFromGRPCMetadata(ctx context.Context) (string, bool) { } return md.Get(DefaultTenantHeader)[0], true } + +func EnforceQueryTenancy(tenantLabel string, tenant string, query string) (string, error) { + labelMatcher := &labels.Matcher{ + Name: tenantLabel, + Type: labels.MatchEqual, + Value: tenant, + } + + e := injectproxy.NewEnforcer(false, labelMatcher) + + expr, err := parser.ParseExpr(query) + if err != nil { + return "", errors.Wrap(err, "error parsing query string, when enforcing tenenacy") + } + + if err := e.EnforceNode(expr); err != nil { + return "", errors.Wrap(err, "error enforcing label") + } + + return expr.String(), nil +} + +func getLabelMatchers(formMatchers []string, tenant string, enforceTenancy bool, tenantLabel string) ([][]*labels.Matcher, error) { + tenantLabelMatcher := &labels.Matcher{ + Name: tenantLabel, + Type: labels.MatchEqual, + Value: tenant, + } + + matcherSets := make([][]*labels.Matcher, 0, len(formMatchers)) + + // If tenancy is enforced, but there are no matchers at all, add the tenant matcher + if len(formMatchers) == 0 && enforceTenancy { + var matcher []*labels.Matcher + matcher = append(matcher, tenantLabelMatcher) + matcherSets = append(matcherSets, matcher) + return matcherSets, nil + } + + for _, s := range formMatchers { + matchers, err := parser.ParseMetricSelector(s) + if err != nil { + return nil, err + } + + if enforceTenancy { + e := injectproxy.NewEnforcer(false, tenantLabelMatcher) + matchers, err = e.EnforceMatchers(matchers) + if err != nil { + return nil, err + } + } + + matcherSets = append(matcherSets, matchers) + } + + return matcherSets, nil +} + +// This function will: +// - Get tenant from HTTP header and add it to context. +// - if tenancy is enforced, add a tenant matcher to the promQL expression. +func RewritePromQL(ctx context.Context, r *http.Request, tenantHeader string, defaultTenantID string, certTenantField string, enforceTenancy bool, tenantLabel string, queryStr string) (string, string, context.Context, error) { + tenant, err := GetTenantFromHTTP(r, tenantHeader, defaultTenantID, certTenantField) + if err != nil { + return "", "", ctx, err + } + ctx = context.WithValue(ctx, TenantKey, tenant) + + if enforceTenancy { + queryStr, err = EnforceQueryTenancy(tenantLabel, tenant, queryStr) + return queryStr, tenant, ctx, err + } + return queryStr, tenant, ctx, nil +} + +// This function will: +// - Get tenant from HTTP header and add it to context. +// - Parse all labels matchers provided. +// - If tenancy is enforced, make sure a tenant matcher is present. +func RewriteLabelMatchers(ctx context.Context, r *http.Request, tenantHeader string, defaultTenantID string, certTenantField string, enforceTenancy bool, tenantLabel string, formMatchers []string) ([][]*labels.Matcher, context.Context, error) { + tenant, err := GetTenantFromHTTP(r, tenantHeader, defaultTenantID, certTenantField) + if err != nil { + return nil, ctx, err + } + ctx = context.WithValue(ctx, TenantKey, tenant) + + matcherSets, err := getLabelMatchers(formMatchers, tenant, enforceTenancy, tenantLabel) + if err != nil { + return nil, ctx, err + } + + return matcherSets, ctx, nil +} diff --git a/pkg/ui/react-app/src/pages/graph/Panel.test.tsx b/pkg/ui/react-app/src/pages/graph/Panel.test.tsx index 993c60b1a4..0837513997 100644 --- a/pkg/ui/react-app/src/pages/graph/Panel.test.tsx +++ b/pkg/ui/react-app/src/pages/graph/Panel.test.tsx @@ -26,6 +26,9 @@ const defaultProps: PanelProps = { analyze: false, disableAnalyzeCheckbox: false, }, + onUsePartialResponseChange: (): void => { + // Do nothing. + }, onOptionsChanged: (): void => { // Do nothing. }, @@ -47,6 +50,7 @@ const defaultProps: PanelProps = { enableHighlighting: true, enableLinter: true, defaultEngine: 'prometheus', + usePartialResponse: true, }; describe('Panel', () => { diff --git a/pkg/ui/react-app/src/pages/graph/Panel.tsx b/pkg/ui/react-app/src/pages/graph/Panel.tsx index fb2bd59b75..915d5d1813 100644 --- a/pkg/ui/react-app/src/pages/graph/Panel.tsx +++ b/pkg/ui/react-app/src/pages/graph/Panel.tsx @@ -43,9 +43,11 @@ export interface PanelProps { stores: Store[]; enableAutocomplete: boolean; enableHighlighting: boolean; + usePartialResponse: boolean; enableLinter: boolean; defaultStep: string; defaultEngine: string; + onUsePartialResponseChange: (value: boolean) => void; } interface PanelState { @@ -93,7 +95,7 @@ export const PanelDefaultOptions: PanelOptions = { maxSourceResolution: '0s', useDeduplication: true, forceTracing: false, - usePartialResponse: false, + usePartialResponse: true, storeMatches: [], engine: '', analyze: false, @@ -166,6 +168,13 @@ class Panel extends Component { componentDidMount(): void { this.executeQuery(); + const storedValue = localStorage.getItem('usePartialResponse'); + if (storedValue !== null) { + // Set the default value in state and local storage + this.setOptions({ usePartialResponse: true }); + this.props.onUsePartialResponseChange(true); + localStorage.setItem('usePartialResponse', JSON.stringify(true)); + } } executeQuery = (): void => { @@ -231,6 +240,7 @@ class Panel extends Component { method: 'GET', headers: { 'Content-Type': 'application/json', + 'X-Thanos-Force-Tracing': 'true', // Conditionally add the header if the checkbox is enabled ...(this.props.options.forceTracing ? { 'X-Thanos-Force-Tracing': 'true' } : {}), }, @@ -238,8 +248,15 @@ class Panel extends Component { credentials: 'same-origin', signal: abortController.signal, }) - .then((resp) => resp.json()) - .then((json) => { + .then((resp) => { + return resp.json().then((json) => { + return { + json, + headers: resp.headers, + }; + }); + }) + .then(({ json, headers }) => { if (json.status !== 'success') { throw new Error(json.error || 'invalid response JSON'); } @@ -254,7 +271,7 @@ class Panel extends Component { } analysis = json.data.analysis; } - + const traceID = headers.get('X-Thanos-Trace-ID'); this.setState({ error: null, data: json.data, @@ -262,12 +279,14 @@ class Panel extends Component { startTime, endTime, resolution, + traceID: traceID ? traceID : '', }, warnings: json.warnings, stats: { loadTime: Date.now() - queryStart, resolution, resultSeries, + traceID, }, loading: false, analysis: analysis, @@ -336,7 +355,17 @@ class Panel extends Component { }; handleChangePartialResponse = (event: React.ChangeEvent): void => { - this.setOptions({ usePartialResponse: event.target.checked }); + let newValue = event.target.checked; + + const storedValue = localStorage.getItem('usePartialResponse'); + + if (storedValue === 'true') { + newValue = true; + } + this.setOptions({ usePartialResponse: newValue }); + this.props.onUsePartialResponseChange(newValue); + + localStorage.setItem('usePartialResponse', JSON.stringify(event.target.checked)); }; handleStoreMatchChange = (selectedStores: any): void => { @@ -510,7 +539,7 @@ class Panel extends Component { wrapperStyles={{ marginLeft: 20, display: 'inline-block' }} id={`force-tracing-checkbox-${id}`} onChange={this.handleChangeForceTracing} - defaultchecked={options.forceTracing} + defaultChecked={options.forceTracing} > Force Tracing diff --git a/pkg/ui/react-app/src/pages/graph/PanelList.tsx b/pkg/ui/react-app/src/pages/graph/PanelList.tsx index 1cc66ddbdd..19fb9e1319 100644 --- a/pkg/ui/react-app/src/pages/graph/PanelList.tsx +++ b/pkg/ui/react-app/src/pages/graph/PanelList.tsx @@ -31,6 +31,7 @@ interface PanelListProps extends PathPrefixProps, RouteComponentProps { enableLinter: boolean; defaultStep: string; defaultEngine: string; + usePartialResponse: boolean; } export const PanelListContent: FC = ({ @@ -44,6 +45,7 @@ export const PanelListContent: FC = ({ enableLinter, defaultStep, defaultEngine, + usePartialResponse, ...rest }) => { const [panels, setPanels] = useState(rest.panels); @@ -95,6 +97,9 @@ export const PanelListContent: FC = ({ }, ]); }; + const handleUsePartialResponseChange = (value: boolean): void => { + localStorage.setItem('usePartialResponse', JSON.stringify(value)); + }; return ( <> @@ -128,6 +133,8 @@ export const PanelListContent: FC = ({ defaultEngine={defaultEngine} enableLinter={enableLinter} defaultStep={defaultStep} + usePartialResponse={usePartialResponse} + onUsePartialResponseChange={handleUsePartialResponseChange} /> ))} + diff --git a/website/layouts/partials/versioning/version-picker.html b/website/layouts/partials/versioning/version-picker.html index 732afd453d..f2f4fa9c1e 100644 --- a/website/layouts/partials/versioning/version-picker.html +++ b/website/layouts/partials/versioning/version-picker.html @@ -4,6 +4,7 @@ {{- range .Site.Sections.Reverse }} {{- $version := .Section }} {{- if eq $version "blog" }}{{continue}}{{end}} + {{- if eq $version "support" }}{{continue}}{{end}} {{ $version }} diff --git a/website/layouts/support/list.html b/website/layouts/support/list.html new file mode 100644 index 0000000000..92111bf3a0 --- /dev/null +++ b/website/layouts/support/list.html @@ -0,0 +1,31 @@ +{{ define "main" }} +
+
+
+ {{ range .Paginator.Pages }} +
+

Support and Training

+

Firms that offer consultancy and enterprise support.

+ +
+ {{ .Summary }} +
+ + {{ end}} + {{ template "_internal/pagination.html" . }} +
+
+
+ {{ end }} \ No newline at end of file diff --git a/website/layouts/support/single.html b/website/layouts/support/single.html new file mode 100644 index 0000000000..f0a955227e --- /dev/null +++ b/website/layouts/support/single.html @@ -0,0 +1,14 @@ +{{ define "main" }} +
+
+
+
+

{{ .Title }}

+

+
+ {{ .Content }} +
+
+
+
+ {{ end }} \ No newline at end of file diff --git a/website/static/cloudraft.png b/website/static/cloudraft.png new file mode 100644 index 0000000000..c76ca7987f Binary files /dev/null and b/website/static/cloudraft.png differ diff --git a/website/static/logos/blinkit.png b/website/static/logos/blinkit.png new file mode 100644 index 0000000000..22e2add1af Binary files /dev/null and b/website/static/logos/blinkit.png differ diff --git a/website/static/logos/grofers.png b/website/static/logos/grofers.png deleted file mode 100644 index f73d67cd1b..0000000000 Binary files a/website/static/logos/grofers.png and /dev/null differ diff --git a/website/static/o11y.svg b/website/static/o11y.svg new file mode 100644 index 0000000000..2163d8cf1b --- /dev/null +++ b/website/static/o11y.svg @@ -0,0 +1,37 @@ + + + + + + + + + + + + diff --git a/website/static/tasrie-it-services.png b/website/static/tasrie-it-services.png new file mode 100644 index 0000000000..3c998ae9ab Binary files /dev/null and b/website/static/tasrie-it-services.png differ